Merge branch 'drm-next-3.10' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Tue, 16 Apr 2013 22:48:23 +0000 (08:48 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 16 Apr 2013 22:48:23 +0000 (08:48 +1000)
Alex writes:
This is the initial 3.10 pull request for radeon.  The big changes here
are UVD support and proper tiling support for SI.  The rest is
bug fixes.  I hope to have another pull request later in the week with
some new things we've been working on internally.

* 'drm-next-3.10' of git://people.freedesktop.org/~agd5f/linux: (28 commits)
  drm/radeon: Always flush the VM
  drm/radeon: re-enable PTE/PDE packet for set_page on cayman/TN
  drm/radeon: cleanup properly if mmio mapping fails
  drm/radeon/evergreen+: don't enable HPD interrupts on eDP/LVDS
  drm/radeon: add si tile mode array query v3
  drm/radeon: add ring working query
  drm/radeon: handle broken disabled rb mask gracefully
  drm/radeon: add pcie set/get lanes callbacks for newer asics
  drm/radeon: update r600 set/get pcie lane config
  drm/radeon/kms: replace *REG32_PCIE_P with *REG32_PCIE_PORT
  drm/radeon: remove unused blit remnants from si.c
  drm/radeon: add UVD tiling addr config v2
  drm/radeon: init UVD clocks to sane defaults
  drm/radeon: add set_uvd_clocks callback for r7xx v3
  drm/radeon: add set_uvd_clocks callback for SI
  drm/radeon: add set_uvd_clocks callback for evergreen
  drm/radeon: add set_uvd_clocks callback for ON/LN/TN (v4)
  drm/radeon: add radeon_atom_get_clock_dividers helper
  drm/radeon: add pm callback for setting uvd clocks
  drm/radeon: UVD bringup v8
  ...

543 files changed:
Documentation/EDID/1600x1200.S [new file with mode: 0644]
Documentation/EDID/HOWTO.txt
Documentation/networking/ipvs-sysctl.txt
MAINTAINERS
Makefile
arch/arc/include/asm/dma-mapping.h
arch/arc/include/asm/elf.h
arch/arc/include/asm/entry.h
arch/arc/include/asm/kgdb.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/syscalls.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/entry.S
arch/arc/kernel/kgdb.c
arch/arc/kernel/setup.c
arch/arc/kernel/sys.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/armada-370-mirabox.dts
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/dbx5x0.dtsi
arch/arm/boot/dts/kirkwood-goflexnet.dts
arch/arm/boot/dts/orion5x.dtsi
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/include/asm/delay.h
arch/arm/include/asm/highmem.h
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kvm/vgic.c
arch/arm/lib/delay.c
arch/arm/mach-cns3xxx/core.c
arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
arch/arm/mach-ep93xx/include/mach/uncompress.h
arch/arm/mach-imx/common.h
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/src.c
arch/arm/mach-kirkwood/guruplug-setup.c
arch/arm/mach-kirkwood/openrd-setup.c
arch/arm/mach-kirkwood/rd88f6281-setup.c
arch/arm/mach-msm/timer.c
arch/arm/mach-mvebu/irq-armada-370-xp.c
arch/arm/mach-mxs/mach-mxs.c
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/dss-common.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-ux500/board-mop500-sdi.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/board-mop500.h
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/context.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm64/mm/mmu.c
arch/ia64/kernel/process.c
arch/mips/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/nvram.c
arch/mips/bcm63xx/setup.c
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/signal.h
arch/mips/kernel/Makefile
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/linux32.c
arch/mips/kernel/proc.c
arch/mips/lib/bitops.c
arch/mips/lib/csum_partial.S
arch/powerpc/kernel/epapr_paravirt.c
arch/powerpc/kernel/exceptions-64s.S
arch/s390/include/asm/pgtable.h
arch/s390/lib/uaccess_pt.c
arch/tile/kernel/setup.c
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/microcode_intel_early.c
arch/x86/lib/usercopy_64.c
arch/x86/xen/mmu.c
block/blk-flush.c
block/partition-generic.c
drivers/acpi/apei/cper.c
drivers/acpi/pci_root.c
drivers/acpi/sleep.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regmap.c
drivers/block/Kconfig
drivers/block/aoe/aoecmd.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/rbd.c
drivers/block/rsxx/Makefile
drivers/block/rsxx/config.c
drivers/block/rsxx/core.c
drivers/block/rsxx/cregs.c
drivers/block/rsxx/dma.c
drivers/block/rsxx/rsxx.h
drivers/block/rsxx/rsxx_cfg.h
drivers/block/rsxx/rsxx_priv.h
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/hw_random/core.c
drivers/char/virtio_console.c
drivers/clk/tegra/clk-tegra20.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/compat.h
drivers/crypto/talitos.c
drivers/dma/Kconfig
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max8997.c
drivers/firmware/Kconfig
drivers/firmware/efivars.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-stmpe.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/qxl/Kconfig [new file with mode: 0644]
drivers/gpu/drm/qxl/Makefile [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_cmd.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_dev.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_display.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_draw.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_drv.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_drv.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_dumb.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_fb.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_fence.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_gem.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_image.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_ioctl.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_irq.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_kms.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_object.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_object.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_release.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_ttm.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/qib/Kconfig
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/irq_remapping.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/platform/Kconfig
drivers/media/platform/exynos-gsc/gsc-core.c
drivers/media/platform/s5p-fimc/fimc-core.c
drivers/media/platform/s5p-fimc/fimc-lite-reg.c
drivers/media/platform/s5p-fimc/fimc-lite.c
drivers/media/platform/s5p-fimc/fimc-mdevice.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
drivers/media/radio/radio-ma901.c
drivers/media/rc/Kconfig
drivers/media/v4l2-core/Makefile
drivers/misc/mei/hw-me.c
drivers/misc/mei/init.c
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_vmci/vmci_datagram.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sja1000/sja1000.h
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/davicom/dm9000.h
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/marvell/sky2.h
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/usb/smsc75xx.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/rtlwifi/usb.c
drivers/pinctrl/mvebu/pinctrl-mvebu.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinconf.h
drivers/pinctrl/pinctrl-abx500.c
drivers/pinctrl/pinmux.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_drv.c
drivers/s390/char/tty3270.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/libfc/fc_disc.c
drivers/spi/Kconfig
drivers/staging/comedi/drivers/s626.c
drivers/staging/zcache/Kconfig
drivers/target/target_core_transport.c
drivers/tty/serial/8250/8250.c [deleted file]
drivers/tty/serial/8250/8250_core.c [new file with mode: 0644]
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/8250/Makefile
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/vc_screen.c
drivers/usb/core/hcd.c
drivers/usb/core/usb-acpi.c
drivers/usb/gadget/Kconfig
drivers/usb/host/ehci-sched.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/phy/Kconfig
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/usb-serial.c
drivers/vhost/tcm_vhost.c
drivers/video/fbmem.c
drivers/video/hdmi.c
drivers/video/mxsfb.c
drivers/video/omap/omapfb_main.c
drivers/video/omap2/displays/panel-acx565akm.c
drivers/video/omap2/displays/panel-generic-dpi.c
drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
drivers/video/omap2/displays/panel-n8x0.c
drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
drivers/video/omap2/displays/panel-picodlp.c
drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/displays/panel-tfp410.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/apply.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dispc.h
drivers/video/omap2/dss/dpi.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/dss_features.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/output.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/sh_mobile_lcdcfb.c
drivers/xen/Kconfig
drivers/xen/events.c
drivers/xen/fallback.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-pciback/pci_stub.c
firmware/Makefile
firmware/intel/sd7220.fw.ihex [new file with mode: 0644]
firmware/qlogic/sd7220.fw.ihex [deleted file]
fs/block_dev.c
fs/btrfs/ctree.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/volumes.c
fs/dcache.c
fs/ext4/extents.c
fs/ext4/indirect.c
fs/internal.h
fs/namespace.c
fs/nfs/blocklayout/blocklayoutdm.c
fs/nfs/idmap.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
fs/nfsd/vfs.c
fs/pnode.c
fs/pnode.h
fs/proc/root.c
fs/read_write.c
fs/reiserfs/xattr.c
fs/splice.c
fs/sysfs/dir.c
fs/sysfs/mount.c
fs/ubifs/super.c
include/drm/drm_crtc.h
include/drm/drm_fb_helper.h
include/drm/ttm/ttm_bo_driver.h
include/linux/compat.h
include/linux/debug_locks.h
include/linux/fb.h
include/linux/freezer.h
include/linux/fs_struct.h
include/linux/mfd/max77693-private.h
include/linux/mm.h
include/linux/mman.h
include/linux/mount.h
include/linux/mxsfb.h
include/linux/netdevice.h
include/linux/pm.h
include/linux/scatterlist.h
include/linux/signal.h
include/linux/thermal.h
include/linux/udp.h
include/linux/usb/hcd.h
include/linux/user_namespace.h
include/net/flow_keys.h
include/net/ip_vs.h
include/net/ipip.h
include/scsi/libfc.h
include/uapi/drm/Kbuild
include/uapi/drm/drm.h
include/uapi/drm/drm_mode.h
include/uapi/drm/qxl_drm.h [new file with mode: 0644]
include/uapi/linux/packet_diag.h
include/uapi/linux/unix_diag.h
include/video/omap-panel-data.h [new file with mode: 0644]
include/video/omap-panel-generic-dpi.h [deleted file]
include/video/omap-panel-n8x0.h [deleted file]
include/video/omap-panel-nokia-dsi.h [deleted file]
include/video/omap-panel-picodlp.h [deleted file]
include/video/omap-panel-tfp410.h [deleted file]
include/video/omapdss.h
include/xen/interface/io/blkif.h
include/xen/interface/physdev.h
ipc/mqueue.c
ipc/msg.c
kernel/exit.c
kernel/lockdep.c
kernel/pid_namespace.c
kernel/power/console.c
kernel/time/tick-broadcast.c
kernel/user.c
kernel/user_namespace.c
lib/scatterlist.c
mm/fremap.c
mm/mlock.c
mm/mmap.c
mm/nommu.c
net/8021q/vlan.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/core/dev.c
net/core/flow.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/core/scm.c
net/ipv4/af_inet.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/Kconfig
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_input.c
net/ipv6/netfilter/ip6t_NPT.c
net/ipv6/udp.c
net/irda/af_irda.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nfnetlink_queue_core.c
net/netlink/genetlink.c
net/sched/sch_cbq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sunrpc/sched.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vsock_addr.c
net/vmw_vsock/vsock_addr.h
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/wext-sme.c
net/xfrm/xfrm_replay.c
security/yama/yama_lsm.c

diff --git a/Documentation/EDID/1600x1200.S b/Documentation/EDID/1600x1200.S
new file mode 100644 (file)
index 0000000..0ded64c
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+   1600x1200.S: EDID data set for standard 1600x1200 60 Hz monitor
+
+   Copyright (C) 2013 Carsten Emde <C.Emde@osadl.org>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either version 2
+   of the License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 162000 /* kHz */
+#define XPIX 1600
+#define YPIX 1200
+#define XY_RATIO XY_RATIO_4_3
+#define XBLANK 560
+#define YBLANK 50
+#define XOFFSET 64
+#define XPULSE 192
+#define YOFFSET (63+1)
+#define YPULSE (63+3)
+#define DPI 72
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux UXGA"
+#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
+#define HSYNC_POL 1
+#define VSYNC_POL 1
+#define CRC 0x9d
+
+#include "edid.S"
index 2d0a8f09475de60152d308b2d3f96254a9fd380e..7146db1d9e8cf1e855b86002e34820dc270665c9 100644 (file)
@@ -18,12 +18,12 @@ CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
 individually prepared or corrected EDID data set in the /lib/firmware
 directory from where it is loaded via the firmware interface. The code
 (see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
-commonly used screen resolutions (1024x768, 1280x1024, 1680x1050,
-1920x1080) as binary blobs, but the kernel source tree does not contain
-code to create these data. In order to elucidate the origin of the
-built-in binary EDID blobs and to facilitate the creation of individual
-data for a specific misbehaving monitor, commented sources and a
-Makefile environment are given here.
+commonly used screen resolutions (1024x768, 1280x1024, 1600x1200,
+1680x1050, 1920x1080) as binary blobs, but the kernel source tree does
+not contain code to create these data. In order to elucidate the origin
+of the built-in binary EDID blobs and to facilitate the creation of
+individual data for a specific misbehaving monitor, commented sources
+and a Makefile environment are given here.
 
 To create binary EDID and C source code files from the existing data
 material, simply type "make".
index f2a2488f1bf33d8290384c4823fccee2b39afe36..9573d0c48c6ea882099edd94ed2dcda94e4fe3dd 100644 (file)
@@ -15,6 +15,13 @@ amemthresh - INTEGER
         enabled and the variable is automatically set to 2, otherwise
         the strategy is disabled and the variable is  set  to 1.
 
+backup_only - BOOLEAN
+       0 - disabled (default)
+       not 0 - enabled
+
+       If set, disable the director function while the server is
+       in backup mode to avoid packet loops for DR/TUN methods.
+
 conntrack - BOOLEAN
        0 - disabled (default)
        not 0 - enabled
index 4cf5fd334a06a32cc25fdadff37355b8fc6c397f..836a6183c37f6f6e2ceec36ea72497136fffde4c 100644 (file)
@@ -3242,6 +3242,12 @@ F:       Documentation/firmware_class/
 F:     drivers/base/firmware*.c
 F:     include/linux/firmware.h
 
+FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
+M:     Joshua Morris <josh.h.morris@us.ibm.com>
+M:     Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+S:     Maintained
+F:     drivers/block/rsxx/
+
 FLOPPY DRIVER
 M:     Jiri Kosina <jkosina@suse.cz>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@@ -5059,9 +5065,8 @@ S:        Maintained
 F:     drivers/net/ethernet/marvell/sk*
 
 MARVELL LIBERTAS WIRELESS DRIVER
-M:     Dan Williams <dcbw@redhat.com>
 L:     libertas-dev@lists.infradead.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
@@ -5563,6 +5568,7 @@ F:        include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 
 NETXEN (1/10) GbE SUPPORT
+M:     Manish Chopra <manish.chopra@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     Rajesh Borundia <rajesh.borundia@qlogic.com>
 L:     netdev@vger.kernel.org
@@ -5683,7 +5689,7 @@ S:        Maintained
 F:     arch/arm/*omap*/*clock*
 
 OMAP POWER MANAGEMENT SUPPORT
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
@@ -5777,7 +5783,7 @@ F:        arch/arm/*omap*/usb*
 
 OMAP GPIO DRIVER
 M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-omap.c
@@ -6209,7 +6215,7 @@ F:        include/linux/power_supply.h
 F:     drivers/power/
 
 PNP SUPPORT
-M:     Adam Belay <abelay@mit.edu>
+M:     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 M:     Bjorn Helgaas <bhelgaas@google.com>
 S:     Maintained
 F:     drivers/pnp/
@@ -6551,12 +6557,6 @@ S:       Maintained
 F:     Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
-RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
-M:     Joshua Morris <josh.h.morris@us.ibm.com>
-M:     Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-S:     Maintained
-F:     drivers/block/rsxx/
-
 RANDOM NUMBER DRIVER
 M:     Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
@@ -7173,7 +7173,7 @@ F:        arch/arm/mach-s3c2410/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
 T:     git git://gitorious.org/linux-davinci/linux-davinci.git
 Q:     http://patchwork.kernel.org/project/linux-davinci/list/
@@ -7706,9 +7706,10 @@ F:       include/linux/swiotlb.h
 
 SYNOPSYS ARC ARCHITECTURE
 M:     Vineet Gupta <vgupta@synopsys.com>
-L:     linux-snps-arc@vger.kernel.org
 S:     Supported
 F:     arch/arc/
+F:     Documentation/devicetree/bindings/arc/
+F:     drivers/tty/serial/arc-uart.c
 
 SYSV FILESYSTEM
 M:     Christoph Hellwig <hch@infradead.org>
index 54d2b2a0fef0e2a1dba259b8f97ac0f7e8341225..58a165b02af1e27acb6d2635f3c9ab0d58c5cb00 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 31f77aec082381e34f5cb7ddb06added7d98d179..45b8e0cea1764d2b9e4a0b0d1f27b787eb845870 100644 (file)
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i)
-               sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+               s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
                                               s->length, dir);
 
        return nents;
index f4c8d36ebecbd2dcad76db454edc64686cfae292..a262828576839d9d48e5e2be18ae65b9fad815e5 100644 (file)
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
  */
 #define ELF_PLATFORM   (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 23daa326fc9b4d19c5711af313e730dae2551f27..eb2ae53187d95fe1240885bd7026d1f13c3c98a4 100644 (file)
  *-------------------------------------------------------------*/
 .macro SAVE_ALL_EXCEPTION   marker
 
-       st      \marker, [sp, 8]
+       st      \marker, [sp, 8]        /* orig_r8 */
        st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
 
        /* Restore r9 used to code the early prologue */
index f3c4934f0ca9462e1876095fa197a90dc2e4fede..4930957ca3d38c4cb312aa60f21ffa9cbb413087 100644 (file)
@@ -13,7 +13,7 @@
 
 #ifdef CONFIG_KGDB
 
-#include <asm/user.h>
+#include <asm/ptrace.h>
 
 /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
  * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
 };
 
 #else
-static inline void kgdb_trap(struct pt_regs *regs, int param)
-{
-}
+#define kgdb_trap(regs, param)
 #endif
 
 #endif /* __ARC_KGDB_H__ */
index 8ae783d20a81185dab9ae04e61f5e2a9e47082b0..6179de7e07c21ca4875017adc1107c0e7b0b46c4 100644 (file)
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 #define orig_r8_IS_SCALL               0x0001
 #define orig_r8_IS_SCALL_RESTARTED     0x0002
 #define orig_r8_IS_BRKPT               0x0004
-#define orig_r8_IS_EXCPN               0x0004
+#define orig_r8_IS_EXCPN               0x0008
 #define orig_r8_IS_IRQ1                        0x0010
 #define orig_r8_IS_IRQ2                        0x0020
 
index e53a5340ba4f2b91376f76ca7b4f5286567cd302..dd785befe7fd1eaa3b6b7d5df5ee8ef77e849587 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/types.h>
 
 int sys_clone_wrapper(int, int, int, int, int);
-int sys_fork_wrapper(void);
-int sys_vfork_wrapper(void);
 int sys_cacheflush(uint32_t, uint32_t uint32_t);
 int sys_arc_settls(void *);
 int sys_arc_gettls(void);
index 6afa4f70207529c5d9ae7d0caa69082108d154c9..30333cec0fef274365aeb559084d58a5ee023a9b 100644 (file)
 */
 struct user_regs_struct {
 
-       struct scratch {
+       struct {
                long pad;
                long bta, lp_start, lp_end, lp_count;
                long status32, ret, blink, fp, gp;
                long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
                long sp;
        } scratch;
-       struct callee {
+       struct {
                long pad;
                long r25, r24, r23, r22, r21, r20;
                long r19, r18, r17, r16, r15, r14, r13;
index ef6800ba2f03f6cce95a78e1db7b175f66d72e7c..91eeab81f52d3d66a408a215f06b4ff19022d364 100644 (file)
@@ -452,7 +452,7 @@ tracesys:
        ; using ERET won't work since next-PC has already committed
        lr  r12, [efa]
        GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
-       st  r12, [r11, THREAD_FAULT_ADDR]
+       st  r12, [r11, THREAD_FAULT_ADDR]       ; thread.fault_address
 
        ; PRE Sys Call Ptrace hook
        mov r0, sp                      ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
 
 ;################### Special Sys Call Wrappers ##########################
 
-; TBD: call do_fork directly from here
-ARC_ENTRY sys_fork_wrapper
-       SAVE_CALLEE_SAVED_USER
-       bl  @sys_fork
-       DISCARD_CALLEE_SAVED_USER
-
-       GET_CURR_THR_INFO_FLAGS   r10
-       btst r10, TIF_SYSCALL_TRACE
-       bnz  tracesys_exit
-
-       b ret_from_system_call
-ARC_EXIT sys_fork_wrapper
-
-ARC_ENTRY sys_vfork_wrapper
-       SAVE_CALLEE_SAVED_USER
-       bl  @sys_vfork
-       DISCARD_CALLEE_SAVED_USER
-
-       GET_CURR_THR_INFO_FLAGS   r10
-       btst r10, TIF_SYSCALL_TRACE
-       bnz  tracesys_exit
-
-       b ret_from_system_call
-ARC_EXIT sys_vfork_wrapper
-
 ARC_ENTRY sys_clone_wrapper
        SAVE_CALLEE_SAVED_USER
        bl  @sys_clone
index 2888ba5be47e4be4e796d0840abb9570253a416a..52bdc83c1495b5bf31a685998f7a5d7b9a1bd15e 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kgdb.h>
+#include <linux/sched.h>
 #include <asm/disasm.h>
 #include <asm/cacheflush.h>
 
index dc0f968dae0aecfa96db2bc005a99c74bdabb71a..2d95ac07df7bde15169ff5058d7e53afd2f2d73e 100644 (file)
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "\n");
 
-#ifdef _ASM_GENERIC_UNISTD_H
        n += scnprintf(buf + n, len - n,
-                      "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
-#endif
+                      "OS ABI [v3]\t: no-legacy-syscalls\n");
 
        return buf;
 }
index f6bdd07583f3a2ed190fdc1de9ceff9e77bf2ba8..9d6c1ca26af6d8bf4c035ae5f6085d8bfb6bc0be 100644 (file)
@@ -6,8 +6,6 @@
 #include <asm/syscalls.h>
 
 #define sys_clone      sys_clone_wrapper
-#define sys_fork       sys_fork_wrapper
-#define sys_vfork      sys_vfork_wrapper
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call) [nr] = (call),
index 13b739469c515cb93a8dff1cdf686583a3941c54..1cacda426a0ea6699528dd0eeedf83032825e09e 100644 (file)
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
        default 8
 
 config IWMMXT
-       bool "Enable iWMMXt support"
+       bool "Enable iWMMXt support" if !CPU_PJ4
        depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
-       default y if PXA27x || PXA3xx || ARCH_MMP
+       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
        help
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
@@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
         to deadlock. This workaround puts DSB before executing ISB if
         an abort may occur on cache maintenance.
 
+config ARM_ERRATA_798181
+       bool "ARM errata: TLBI/DSB failure on Cortex-A15"
+       depends on CPU_V7 && SMP
+       help
+         On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
+         adequately shooting down all use of the old entries. This
+         option enables the Linux kernel workaround for this erratum
+         which sends an IPI to the CPUs that are running the same ASID
+         as the one being invalidated.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index ecfcdba2d17c5976c34d238b19fb39181b36883d..9b31f4311ea2717818d8a387d3e0131d335195d0 100644 (file)
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
                                                DEBUG_IMX53_UART || \
                                                DEBUG_IMX6Q_UART
        default 1
+       depends on ARCH_MXC
        help
          Choose UART port on which kernel low-level debug messages
          should be output.
index dd0c57dd9f3096ae40b35e2b4e43fc3c983f3ea1..3234875824dcc35258bef8909d6bd0a283710765 100644 (file)
@@ -54,7 +54,7 @@
                };
 
                mvsdio@d00d4000 {
-                       pinctrl-0 = <&sdio_pins2>;
+                       pinctrl-0 = <&sdio_pins3>;
                        pinctrl-names = "default";
                        status = "okay";
                        /*
index 8188d138020edc57c88db0cb749b91bdf016a5cf..a195debb67d35297292b43c1a9e7fc45c4908a2c 100644 (file)
                                             "mpp50", "mpp51", "mpp52";
                              marvell,function = "sd0";
                        };
+
+                       sdio_pins3: sdio-pins3 {
+                             marvell,pins = "mpp48", "mpp49", "mpp50",
+                                            "mpp51", "mpp52", "mpp53";
+                             marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
index 9de93096601a2d1d526c37faf332a5510a4fc5d1..aaa63d0a80968b20abd4660492fb2af1ed463072 100644 (file)
 
                prcmu: prcmu@80157000 {
                        compatible = "stericsson,db8500-prcmu";
-                       reg = <0x80157000 0x1000>;
-                       reg-names = "prcmu";
+                       reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
+                       reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
                        interrupts = <0 47 0x4>;
                        #address-cells = <1>;
                        #size-cells = <1>;
index bd83b8fc7c83f01304397ac17e75be5a6438128e..c3573be7b92c18d1bb0b577f200c8d0bcc170351 100644 (file)
@@ -77,6 +77,7 @@
                };
 
                nand@3000000 {
+                       chip-delay = <40>;
                        status = "okay";
 
                        partition@0 {
index 8aad00f81ed9393118e64b38bfe96dc9aab56d28..f7bec3b1ba323538c7ef26d3f9b2aa5cd68b00cc 100644 (file)
@@ -13,6 +13,9 @@
        compatible = "marvell,orion5x";
        interrupt-parent = <&intc>;
 
+       aliases {
+               gpio0 = &gpio0;
+       };
        intc: interrupt-controller {
                compatible = "marvell,orion-intc", "marvell,intc";
                interrupt-controller;
@@ -32,7 +35,9 @@
                        #gpio-cells = <2>;
                        gpio-controller;
                        reg = <0x10100 0x40>;
-                       ngpio = <32>;
+                       ngpios = <32>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <6>, <7>, <8>, <9>;
                };
 
@@ -91,7 +96,7 @@
                        reg = <0x90000 0x10000>,
                              <0xf2200000 0x800>;
                        reg-names = "regs", "sram";
-                       interrupts = <22>;
+                       interrupts = <28>;
                        status = "okay";
                };
        };
index 48d00a099ce38d75d0df6ceb513078383c8d54e5..3d3f64d2111a33fb415979c2a1b911c2f7b37a66 100644 (file)
 
        spi@7000d800 {
                compatible = "nvidia,tegra20-slink";
-               reg = <0x7000d480 0x200>;
+               reg = <0x7000d800 0x200>;
                interrupts = <0 83 0x04>;
                nvidia,dma-request-selector = <&apbdma 17>;
                #address-cells = <1>;
index 9d87a3ffe9980a140585d063dbb9be5f2fbfa482..dbf46c27256255fd35ffaf6501a314f50668af3c 100644 (file)
 
        spi@7000d800 {
                compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
-               reg = <0x7000d480 0x200>;
+               reg = <0x7000d800 0x200>;
                interrupts = <0 83 0x04>;
                nvidia,dma-request-selector = <&apbdma 17>;
                #address-cells = <1>;
index 720799fd3a81195f02563ed5540d649adea14e05..dff714d886d58dbdce93a468e6a098cf45db98f6 100644 (file)
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
        void (*delay)(unsigned long);
        void (*const_udelay)(unsigned long);
        void (*udelay)(unsigned long);
-       bool const_clock;
+       unsigned long ticks_per_jiffy;
 } arm_delay_ops;
 
 #define __delay(n)             arm_delay_ops.delay(n)
index 8c5e828f484dd7a039a2c5c9d060d6bba008c0ef..91b99abe7a95c114be0d3b628fb8b8d09f781c74 100644 (file)
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
 #endif
 #endif
 
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
 extern void *kmap_high_get(struct page *page);
 #else
index 863a6611323c70077a9428b198a10d59758ff919..a7b85e0d0cc154a90a2efadca763cc60d95e82d8 100644 (file)
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
 #define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
 
+DECLARE_PER_CPU(atomic64_t, active_asids);
+
 #else  /* !CONFIG_CPU_HAS_ASID */
 
 #ifdef CONFIG_MMU
index 4db8c8820f0d1c832bf9efd5e81a69d32b4fc7d7..9e9c041358ca8789e4a6798aaf871007795e8fb9 100644 (file)
@@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void)
                isb();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+       /*
+        * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
+        */
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb();
+}
+#else
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+}
+#endif
+
 /*
  *     flush_pmd_entry
  *
index 3248cde504ed9e3995d2f14357e9e1875b3f8129..fefd7f971437a084bedb5d4281fd6e78bac59d04 100644 (file)
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
  */
 
 .macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site.  Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad   #4)
        stmdb   sp!, {r0-r3, lr}
+ UNWIND(.save  {r0-r3, lr})
 .endm
 
 .macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
 .endm
 
 ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
 #ifdef CONFIG_DYNAMIC_FTRACE
        mov     ip, lr
        ldmia   sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
 #else
        __mcount
 #endif
+UNWIND(.fnend)
 ENDPROC(__gnu_mcount_nc)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller)
+UNWIND(.fnstart)
        __ftrace_caller
+UNWIND(.fnend)
 ENDPROC(ftrace_caller)
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
        __ftrace_graph_caller
+UNWIND(.fnend)
 ENDPROC(ftrace_graph_caller)
 #endif
 
index e0eb9a1cae774fc714548c3e11ad5141cc82190e..8bac553fe213def562dec9e30cad88c827d6239c 100644 (file)
@@ -267,7 +267,7 @@ __create_page_tables:
        addne   r6, r6, #1 << SECTION_SHIFT
        strne   r6, [r3]
 
-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
        sub     r4, r4, #4                      @ Fixup page table pointer
                                                @ for 64-bit descriptors
 #endif
index 96093b75ab90dc134d7232e3f6a1e3c5f41e7902..5dc1aa6f0f7d75e9339094a1da1b61eb018f32f3 100644 (file)
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
        }
 
        if (err) {
-               pr_warning("CPU %d debug is powered down!\n", cpu);
+               pr_warn_once("CPU %d debug is powered down!\n", cpu);
                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
                return;
        }
@@ -987,7 +987,7 @@ clear_vcr:
        isb();
 
        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
-               pr_warning("CPU %d failed to disable vector catch\n", cpu);
+               pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
                return;
        }
 
@@ -1007,7 +1007,7 @@ clear_vcr:
        }
 
        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
-               pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
+               pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
                return;
        }
 
index 3f6cbb2e3edae392f2b73520b89b6ee3b19e8437..d343a6c3a6d1f26ec10ca55f914785b65b4fa473 100644 (file)
@@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
        printk("%s", buf);
 }
 
+static void __init cpuid_init_hwcaps(void)
+{
+       unsigned int divide_instrs;
+
+       if (cpu_architecture() < CPU_ARCH_ARMv7)
+               return;
+
+       divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
+
+       switch (divide_instrs) {
+       case 2:
+               elf_hwcap |= HWCAP_IDIVA;
+       case 1:
+               elf_hwcap |= HWCAP_IDIVT;
+       }
+}
+
 static void __init feat_v6_fixup(void)
 {
        int id = read_cpuid_id();
@@ -483,8 +500,11 @@ static void __init setup_processor(void)
        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
                 list->elf_name, ENDIANNESS);
        elf_hwcap = list->elf_hwcap;
+
+       cpuid_init_hwcaps();
+
 #ifndef CONFIG_ARM_THUMB
-       elf_hwcap &= ~HWCAP_THUMB;
+       elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 #endif
 
        feat_v6_fixup();
@@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
        size -= start & ~PAGE_MASK;
        bank->start = PAGE_ALIGN(start);
 
-#ifndef CONFIG_LPAE
+#ifndef CONFIG_ARM_LPAE
        if (bank->start + size < bank->start) {
                printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
                        "32-bit physical address space\n", (long long)start);
index 79078edbb9bc12d38bb144a88754b83390429c95..1f2ccccaf009751a0ed830c6a91bfa4ea60278e3 100644 (file)
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
        if (freq->flags & CPUFREQ_CONST_LOOPS)
                return NOTIFY_OK;
 
-       if (arm_delay_ops.const_clock)
-               return NOTIFY_OK;
-
        if (!per_cpu(l_p_j_ref, cpu)) {
                per_cpu(l_p_j_ref, cpu) =
                        per_cpu(cpu_data, cpu).loops_per_jiffy;
index bd0300531399e5eeb066a45f3c29b0d897c23b42..e82e1d24877227ba65ab716dc6c87e110617bd79 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 
 /**********************************************************************/
 
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
        local_flush_bp_all();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+static int erratum_a15_798181(void)
+{
+       unsigned int midr = read_cpuid_id();
+
+       /* Cortex-A15 r0p0..r3p2 affected */
+       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+               return 0;
+       return 1;
+}
+#else
+static int erratum_a15_798181(void)
+{
+       return 0;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+       dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+       if (!erratum_a15_798181())
+               return;
+
+       dummy_flush_tlb_a15_erratum();
+       smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
+                              NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+       int cpu;
+       cpumask_t mask = { CPU_BITS_NONE };
+
+       if (!erratum_a15_798181())
+               return;
+
+       dummy_flush_tlb_a15_erratum();
+       for_each_online_cpu(cpu) {
+               if (cpu == smp_processor_id())
+                       continue;
+               /*
+                * We only need to send an IPI if the other CPUs are running
+                * the same ASID as the one being invalidated. There is no
+                * need for locking around the active_asids check since the
+                * switch_mm() function has at least one dmb() (as required by
+                * this workaround) in case a context switch happens on
+                * another CPU after the condition below.
+                */
+               if (atomic64_read(&mm->context.id) ==
+                   atomic64_read(&per_cpu(active_asids, cpu)))
+                       cpumask_set_cpu(cpu, &mask);
+       }
+       smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
 void flush_tlb_all(void)
 {
        if (tlb_ops_need_broadcast())
                on_each_cpu(ipi_flush_tlb_all, NULL, 1);
        else
                local_flush_tlb_all();
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
                on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
        else
                local_flush_tlb_mm(mm);
+       broadcast_tlb_mm_a15_erratum(mm);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
                                        &ta, 1);
        } else
                local_flush_tlb_page(vma, uaddr);
+       broadcast_tlb_mm_a15_erratum(vma->vm_mm);
 }
 
 void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
                on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
        } else
                local_flush_tlb_kernel_page(kaddr);
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
                                        &ta, 1);
        } else
                local_flush_tlb_range(vma, start, end);
+       broadcast_tlb_mm_a15_erratum(vma->vm_mm);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
        } else
                local_flush_tlb_kernel_range(start, end);
+       broadcast_tlb_a15_erratum();
 }
 
 void flush_bp_all(void)
index c9a17316e9fe75f2a8dec4fe3893816d3d0730c9..0e4cfe123b385339629499ae5dacb4f6f5e08ff6 100644 (file)
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
                          lr, irq, vgic_cpu->vgic_lr[lr]);
                BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
                vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
-
-               goto out;
+               return true;
        }
 
        /* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
        vgic_cpu->vgic_irq_lr_map[irq] = lr;
        set_bit(lr, vgic_cpu->lr_used);
 
-out:
        if (!vgic_irq_is_edge(vcpu, irq))
                vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
 
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 
        kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
 
-       /*
-        * We do not need to take the distributor lock here, since the only
-        * action we perform is clearing the irq_active_bit for an EOIed
-        * level interrupt.  There is a potential race with
-        * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
-        * check if the interrupt is already active. Two possibilities:
-        *
-        * - The queuing is occurring on the same vcpu: cannot happen,
-        *   as we're already in the context of this vcpu, and
-        *   executing the handler
-        * - The interrupt has been migrated to another vcpu, and we
-        *   ignore this interrupt for this run. Big deal. It is still
-        *   pending though, and will get considered when this vcpu
-        *   exits.
-        */
        if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
                /*
                 * Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
                        } else {
                                vgic_cpu_irq_clear(vcpu, irq);
                        }
+
+                       /*
+                        * Despite being EOIed, the LR may not have
+                        * been marked as empty.
+                        */
+                       set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+                       vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
                }
        }
 
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 }
 
 /*
- * Sync back the VGIC state after a guest run. We do not really touch
- * the distributor here (the irq_pending_on_cpu bit is safe to set),
- * so there is no need for taking its lock.
+ * Sync back the VGIC state after a guest run. The distributor lock is
+ * needed so we don't get preempted in the middle of the state processing.
  */
 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
        if (!irqchip_in_kernel(vcpu->kvm))
                return;
 
+       spin_lock(&dist->lock);
        __kvm_vgic_sync_hwstate(vcpu);
+       spin_unlock(&dist->lock);
 }
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
index 6b93f6a1a3c7413982d455e674c0a12ba5f70186..64dbfa57204ae9c3fde48c6efaec7f60e3c6af89 100644 (file)
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
 static void __timer_const_udelay(unsigned long xloops)
 {
        unsigned long long loops = xloops;
-       loops *= loops_per_jiffy;
+       loops *= arm_delay_ops.ticks_per_jiffy;
        __timer_delay(loops >> UDELAY_SHIFT);
 }
 
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
                pr_info("Switching to timer-based delay loop\n");
                delay_timer                     = timer;
                lpj_fine                        = timer->freq / HZ;
-               loops_per_jiffy                 = lpj_fine;
+
+               /* cpufreq may scale loops_per_jiffy, so keep a private copy */
+               arm_delay_ops.ticks_per_jiffy   = lpj_fine;
                arm_delay_ops.delay             = __timer_delay;
                arm_delay_ops.const_udelay      = __timer_const_udelay;
                arm_delay_ops.udelay            = __timer_udelay;
-               arm_delay_ops.const_clock       = true;
+
                delay_calibrated                = true;
        } else {
                pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
index e698f26cc0cb5ed6bd3a62b7effb39b70d619376..52e4bb5cf12df3d58295c5120ee7fbb0c2e57fd9 100644 (file)
 
 static struct map_desc cns3xxx_io_desc[] __initdata = {
        {
-               .virtual        = CNS3XXX_TC11MP_TWD_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
-               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
-               .length         = SZ_4K,
+               .virtual        = CNS3XXX_TC11MP_SCU_BASE_VIRT,
+               .pfn            = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
+               .length         = SZ_8K,
                .type           = MT_DEVICE,
        }, {
                .virtual        = CNS3XXX_TIMER1_2_3_BASE_VIRT,
index 191c8e57f2890f09c30610bce584194bc1c3d7f1..b1021aafa4810663a202dc2d1837fcae98c3c2de 100644 (file)
 #define RTC_INTR_STS_OFFSET                    0x34
 
 #define CNS3XXX_MISC_BASE                      0x76000000      /* Misc Control */
-#define CNS3XXX_MISC_BASE_VIRT                 0xFFF07000      /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT                 0xFB000000      /* Misc Control */
 
 #define CNS3XXX_PM_BASE                                0x77000000      /* Power Management Control */
-#define CNS3XXX_PM_BASE_VIRT                   0xFFF08000
+#define CNS3XXX_PM_BASE_VIRT                   0xFB001000
 
 #define PM_CLK_GATE_OFFSET                     0x00
 #define PM_SOFT_RST_OFFSET                     0x04
 #define PM_PLL_HM_PD_OFFSET                    0x1C
 
 #define CNS3XXX_UART0_BASE                     0x78000000      /* UART 0 */
-#define CNS3XXX_UART0_BASE_VIRT                        0xFFF09000
+#define CNS3XXX_UART0_BASE_VIRT                        0xFB002000
 
 #define CNS3XXX_UART1_BASE                     0x78400000      /* UART 1 */
 #define CNS3XXX_UART1_BASE_VIRT                        0xFFF0A000
 #define CNS3XXX_I2S_BASE_VIRT                  0xFFF10000
 
 #define CNS3XXX_TIMER1_2_3_BASE                        0x7C800000      /* Timer */
-#define CNS3XXX_TIMER1_2_3_BASE_VIRT           0xFFF10800
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT           0xFB003000
 
 #define TIMER1_COUNTER_OFFSET                  0x00
 #define TIMER1_AUTO_RELOAD_OFFSET              0x04
  * Testchip peripheral and fpga gic regions
  */
 #define CNS3XXX_TC11MP_SCU_BASE                        0x90000000      /* IRQ, Test chip */
-#define CNS3XXX_TC11MP_SCU_BASE_VIRT           0xFF000000
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT           0xFB004000
 
 #define CNS3XXX_TC11MP_GIC_CPU_BASE            0x90000100      /* Test chip interrupt controller CPU interface */
-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT       0xFF000100
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT       (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
 
 #define CNS3XXX_TC11MP_TWD_BASE                        0x90000600
-#define CNS3XXX_TC11MP_TWD_BASE_VIRT           0xFF000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT           (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
 
 #define CNS3XXX_TC11MP_GIC_DIST_BASE           0x90001000      /* Test chip interrupt controller distributor */
-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT      0xFF001000
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT      (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
 
 #define CNS3XXX_TC11MP_L220_BASE               0x92002000      /* L220 registers */
 #define CNS3XXX_TC11MP_L220_BASE_VIRT          0xFF002000
index d2afb4dd82aba7902049e24e72215e432eed6bd2..b5cc77d2380bd59c24c2da80a9995d45c2277f85 100644 (file)
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
 
 static inline void putc(int c)
 {
-       /* Transmit fifo not full?  */
-       while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
-               ;
+       int i;
+
+       for (i = 0; i < 10000; i++) {
+               /* Transmit fifo not full? */
+               if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
+                       break;
+       }
 
        __raw_writeb(c, PHYS_UART_DATA);
 }
index 5a800bfcec5b9e67bbf20b6b7001a70867c28482..5bf4a97ab2413c3a7234a092a74350423cba359b 100644 (file)
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
 
 extern void imx_enable_cpu(int cpu, bool enable);
 extern void imx_set_cpu_jump(int cpu, void *jump_addr);
+extern u32 imx_get_cpu_arg(int cpu);
+extern void imx_set_cpu_arg(int cpu, u32 arg);
 extern void v7_cpu_resume(void);
 extern u32 *pl310_get_save_ptr(void);
 #ifdef CONFIG_SMP
index 7bc5fe15dda2a3cbc5eb43bd1c93d69830c4af70..361a253e2b63c1abbce953df6745c1fdac8812d7 100644 (file)
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
 void imx_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
+       /*
+        * We use the cpu jumping argument register to sync with
+        * imx_cpu_kill() which is running on cpu0 and waiting for
+        * the register being cleared to kill the cpu.
+        */
+       imx_set_cpu_arg(cpu, ~0);
        cpu_do_idle();
 }
 
 int imx_cpu_kill(unsigned int cpu)
 {
+       unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+       while (imx_get_cpu_arg(cpu) == 0)
+               if (time_after(jiffies, timeout))
+                       return 0;
        imx_enable_cpu(cpu, false);
+       imx_set_cpu_arg(cpu, 0);
        return 1;
 }
index e15f1555c59b1ebd2712ba8b8c31772c340b06ef..09a742f8c7aba31be0155076699eb192f4aa251e 100644 (file)
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
                       src_base + SRC_GPR1 + cpu * 8);
 }
 
+u32 imx_get_cpu_arg(int cpu)
+{
+       cpu = cpu_logical_map(cpu);
+       return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
+void imx_set_cpu_arg(int cpu, u32 arg)
+{
+       cpu = cpu_logical_map(cpu);
+       writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
 void imx_src_prepare_restart(void)
 {
        u32 val;
index 1c6e736cbbf8b64e580f511c4a3f0e6f3a239d0a..08dd739aa70918fe9fdf1c06aeca7eda265ca07f 100644 (file)
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
 
 static struct mvsdio_platform_data guruplug_mvsdio_data = {
        /* unfortunately the CD signal has not been connected */
+       .gpio_card_detect = -1,
+       .gpio_write_protect = -1,
 };
 
 static struct gpio_led guruplug_led_pins[] = {
index 8ddd69fdc9374ebe20c8ab7b1842593164410c05..6a6eb548307d10762b0f56307b81938842d180aa 100644 (file)
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
 
 static struct mvsdio_platform_data openrd_mvsdio_data = {
        .gpio_card_detect = 29, /* MPP29 used as SD card detect */
+       .gpio_write_protect = -1,
 };
 
 static unsigned int openrd_mpp_config[] __initdata = {
index c7d93b48926bbae7d7013190e00ae401e552549b..d24223166e06c60e5d9631d6f8818f44bfb0fc5b 100644 (file)
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
 
 static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
        .gpio_card_detect = 28,
+       .gpio_write_protect = -1,
 };
 
 static unsigned int rd88f6281_mpp_config[] __initdata = {
index 2969027f02fa57045ec7bb3d65d7a98927cfde3c..f9fd77e8f1f5a998af7b8f15e12af3afd5bcc299 100644 (file)
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
 {
        u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
 
-       writel_relaxed(0, event_base + TIMER_CLEAR);
+       ctrl &= ~TIMER_ENABLE_EN;
+       writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+
+       writel_relaxed(ctrl, event_base + TIMER_CLEAR);
        writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
        writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
        return 0;
index 274ff58271de149f168a1a901ccc435ea131f690..6a9195e10579a8263b3446ccc443bd1d1381f784 100644 (file)
@@ -44,6 +44,8 @@
 
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS         (28)
 
+#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ       (5)
+
 #define ACTIVE_DOORBELLS                       (8)
 
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -62,7 +64,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
 #ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+       if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
                writel(hwirq, main_int_base +
                                ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
        else
@@ -79,7 +81,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
 #ifdef CONFIG_SMP
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+       if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
                writel(hwirq, main_int_base +
                                ARMADA_370_XP_INT_SET_ENABLE_OFFS);
        else
@@ -147,7 +149,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
        writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
        irq_set_status_flags(virq, IRQ_LEVEL);
 
-       if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+       if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
                irq_set_percpu_devid(virq);
                irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
                                        handle_percpu_devid_irq);
index 3218f1f2c0e05dfe3b067324d1fecefadb86ae29..e7b781d3788f2c9a9e896ef75b502f789fd15d64 100644 (file)
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
                .lower_margin   = 4,
                .hsync_len      = 1,
                .vsync_len      = 1,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
        },
 };
 
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
                .lower_margin   = 10,
                .hsync_len      = 10,
                .vsync_len      = 10,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
        },
 };
 
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
                .lower_margin   = 45,
                .hsync_len      = 1,
                .vsync_len      = 1,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT,
        },
 };
 
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
                .lower_margin   = 13,
                .hsync_len      = 48,
                .vsync_len      = 3,
-               .sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
-                                 FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
+               .sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
        },
 };
 
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
                .lower_margin = 0x15,
                .hsync_len = 64,
                .vsync_len = 4,
-               .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
-                               FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                               FB_SYNC_DOTCLK_FAILING_ACT,
+               .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
        },
 };
 
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
                .lower_margin   = 2,
                .hsync_len      = 15,
                .vsync_len      = 15,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT
        },
 };
 
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 
        mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
 }
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
        mxsfb_pdata.default_bpp = 16;
        mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
 }
 
 static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 #define ENET0_MDC__GPIO_4_0    MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
 }
 
 static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
        mxsfb_pdata.default_bpp = 16;
        mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 static void __init mxs_machine_init(void)
index cb7c6ae2e3fc66f4a0744bf7930e7e1cd635ee87..6c4f766365a2e31d9b0364ef6460dca7e79c756c 100644 (file)
@@ -538,15 +538,6 @@ static struct clk usb_hhc_ck16xx = {
 };
 
 static struct clk usb_dc_ck = {
-       .name           = "usb_dc_ck",
-       .ops            = &clkops_generic,
-       /* Direct from ULPD, no parent */
-       .rate           = 48000000,
-       .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
-       .enable_bit     = USB_REQ_EN_SHIFT,
-};
-
-static struct clk usb_dc_ck7xx = {
        .name           = "usb_dc_ck",
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
        CLK(NULL,       "usb_clko",     &usb_clko,      CK_16XX | CK_1510 | CK_310),
        CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck1510, CK_1510 | CK_310),
        CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck16xx, CK_16XX),
-       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck,     CK_16XX),
-       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck7xx,  CK_7XX),
+       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck,     CK_16XX | CK_7XX),
        CLK(NULL,       "mclk",         &mclk_1510,     CK_1510 | CK_310),
        CLK(NULL,       "mclk",         &mclk_16xx,     CK_16XX),
        CLK(NULL,       "bclk",         &bclk_1510,     CK_1510 | CK_310),
index a3e0aaa4886b9c4954160d585115a219d1ba3a95..1322ed707b302deec3580298f9104c6b36c0c451 100644 (file)
@@ -38,7 +38,7 @@
 #include "gpmc-smc91x.h"
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "mux.h"
 #include "hsmmc.h"
index ce812decfacad108eecdb20174f7c030ffcf5cc5..2612eeaa58896961336436eac379e43e3ccdffe0 100644 (file)
@@ -35,7 +35,7 @@
 #include "common.h"
 #include <linux/omap-dma.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "gpmc.h"
 #include "gpmc-smc91x.h"
index 9fb85908a61e498a7ae7ec27f18fe2f7b45756db..1d6c28872505607e705beeb434a3fca466b23a8c 100644 (file)
@@ -35,8 +35,7 @@
 
 #include "common.h"
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "am35xx-emac.h"
 #include "mux.h"
index af2bb219e2147d922c95f829e833804f421184b3..bccd3e51fecb6a328a690b623cfabbb8754ee3a6 100644 (file)
@@ -41,8 +41,7 @@
 
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/spi-omap2-mcspi.h>
 
 #include "common.h"
index 53056c3b08362ea6a4c40f8a044f2b146018a1d8..12d2126a2382d432fd02ce73dbc3d69b42e75b93 100644 (file)
@@ -43,8 +43,7 @@
 #include "gpmc.h"
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/input/matrix_keypad.h>
index 812c829fa46f60a3af0aeec99bcb45555500bb05..0c1bdd7ee32d7d3817b10c46c28b8fd7e072aa49 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/mach/map.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "common.h"
 #include "mux.h"
index bf92678a01d061dabd0d857dfb00d83504ce034a..e979d48270c91136d7809d43251e0d0df98d8170 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/mach/arch.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/mtd-onenand-omap2.h>
 
 #include "common.h"
index b12fe966a7b9efc4d64ecc4cc368fa0b3ec64234..8a8e505a0e90ff8837822ae49c5384fac8c0121e 100644 (file)
@@ -41,7 +41,7 @@
 #include "gpmc-smsc911x.h"
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 #include "board-flash.h"
 #include "mux.h"
index c3558f93d42c4a3d52f705573be61e156ff59669..0ce91af753fa85bfaa83c43bf625b90fef3809d9 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/mach/flash.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 #include <linux/platform_data/mtd-nand-omap2.h>
 
 #include "common.h"
index 48789e0bb915059ff8b9fb26d5a2712ff095020b..233a0d528fcf7c5ed93c000a1cad35748ce6fb21 100644 (file)
@@ -51,7 +51,7 @@
 #include "common.h"
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "soc.h"
 #include "mux.h"
index 95c10b3aa678a5321be115c91a353a8296405168..495b989f9040bc9fca749e6842205cb21c447e5e 100644 (file)
@@ -44,8 +44,7 @@
 #include "gpmc.h"
 #include <linux/platform_data/mtd-nand-omap2.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
 
index 86bab51154eefe66ba1808f38fa85cef0ceb41e6..630833235cbc45eb4e192ccb498f378c729ef8a8 100644 (file)
@@ -47,8 +47,7 @@
 #include <asm/mach/map.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-generic-dpi.h>
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 #include "common.h"
 #include "mux.h"
index 3d58f335f173fe4c3a9542590d43a98dbebedefb..0c6834ae1fc43a8ab20409dccd1bb24d7eeb20fd 100644 (file)
  */
 #define OMAP4_DPLL_ABE_DEFFREQ                         98304000
 
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ                         960000000
+
 /* Root clocks */
 
 DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
                    OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
                    hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
 
+DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
+               OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+               OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
+
 DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
                OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
                OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "per_mcbsp4_gfclk",                     &per_mcbsp4_gfclk,      CK_443X),
        CLK(NULL,       "hsmmc1_fclk",                  &hsmmc1_fclk,   CK_443X),
        CLK(NULL,       "hsmmc2_fclk",                  &hsmmc2_fclk,   CK_443X),
+       CLK(NULL,       "ocp2scp_usb_phy_phy_48m",      &ocp2scp_usb_phy_phy_48m,       CK_443X),
        CLK(NULL,       "sha2md5_fck",                  &sha2md5_fck,   CK_443X),
        CLK(NULL,       "slimbus1_fclk_1",              &slimbus1_fclk_1,       CK_443X),
        CLK(NULL,       "slimbus1_fclk_0",              &slimbus1_fclk_0,       CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
        if (rc)
                pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
+       /*
+        * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+        * domain can transition to retention state when not in use.
+        */
+       rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
        return 0;
 }
index 40f4a03d728fc9a7278b722ce809bd57f47c64f4..d6ba13e1c540f6ecc186a164f441ca48b2540a1e 100644 (file)
@@ -293,5 +293,8 @@ extern void omap_reserve(void);
 struct omap_hwmod;
 extern int omap_dss_reset(struct omap_hwmod *);
 
+/* SoC specific clock initializer */
+extern int (*omap_clk_init)(void);
+
 #endif /* __ASSEMBLER__ */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
index 4be5cfc81ab8cd8c0ab1b6333b13c33fe6d0d733..9c49bbe825f79416d3ce77eaa950e6676b6a5e50 100644 (file)
@@ -27,9 +27,7 @@
 #include <linux/gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-tfp410.h>
-#include <video/omap-panel-nokia-dsi.h>
-#include <video/omap-panel-picodlp.h>
+#include <video/omap-panel-data.h>
 
 #include "soc.h"
 #include "dss-common.h"
index 2c3fdd65387b56c542d6d97cbc093fcf69a51b9a..5c445ca1e271ca9f14c9e4fd008035ee449ad9b4 100644 (file)
 #include "prm3xxx.h"
 #include "prm44xx.h"
 
+/*
+ * omap_clk_init: points to a function that does the SoC-specific
+ * clock initializations
+ */
+int (*omap_clk_init)(void);
+
 /*
  * The machine specific code may provide the extra mapping besides the
  * default mapping provided here.
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
        omap242x_clockdomains_init();
        omap2420_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap2420_clk_init();
+       omap_clk_init = omap2420_clk_init;
 }
 
 void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
        omap243x_clockdomains_init();
        omap2430_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap2430_clk_init();
+       omap_clk_init = omap2430_clk_init;
 }
 
 void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap3xxx_clk_init();
+       omap_clk_init = omap3xxx_clk_init;
 }
 
 void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap3xxx_clk_init();
+       omap_clk_init = omap3xxx_clk_init;
 }
 
 void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
        am33xx_clockdomains_init();
        am33xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       am33xx_clk_init();
+       omap_clk_init = am33xx_clk_init;
 }
 #endif
 
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
        omap44xx_clockdomains_init();
        omap44xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap4xxx_clk_init();
+       omap_clk_init = omap4xxx_clk_init;
 }
 
 void __init omap4430_init_late(void)
index c2c798c08c2b9f63a55bc11be5f654f80006431c..a202a47851045c8be6d947ce6407574d7a824f27 100644 (file)
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
                        idlemode = HWMOD_IDLEMODE_NO;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+               if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
+                   (oh->flags & HWMOD_FORCE_MSTANDBY)) {
                        idlemode = HWMOD_IDLEMODE_FORCE;
                } else {
                        if (sf & SYSC_HAS_ENAWAKEUP)
index d43d9b608edab3c1b7e69c96e711ec1eefc2d7c8..d5dc935f6060446aee42a250eddbe6140e9aa42a 100644 (file)
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
  *
  * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
  *     of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
- *     of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
+ *     out of standby, rather than relying on module smart-standby
  * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
  *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file
  *     XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
  *     correctly, or this is being abused to deal with some PM latency
  *     issues -- but we're currently suffering from a shortage of
  *     folks who are able to track these issues down properly.
+ * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
+ *     is kept in force-standby mode. Failing to do so causes PM problems
+ *     with musb on OMAP3630 at least. Note that musb has a dedicated register
+ *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_16BIT_REG                                (1 << 8)
 #define HWMOD_EXT_OPT_MAIN_CLK                 (1 << 9)
 #define HWMOD_BLOCK_WFI                                (1 << 10)
+#define HWMOD_FORCE_MSTANDBY                   (1 << 11)
 
 /*
  * omap_hwmod._int_flags definitions
index ac7e03ec952f22f7c856084c15e2de3e767e4273..5112d04e7b79bd928a3d332f2ec846352e0e5b1b 100644 (file)
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
         * Erratum ID: i479  idle_req / idle_ack mechanism potentially
         * broken when autoidle is enabled
         * workaround is to disable the autoidle bit at module level.
+        *
+        * Enabling the device in any other MIDLEMODE setting but force-idle
+        * causes core_pwrdm not enter idle states at least on OMAP3630.
+        * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
+        * signal when MIDLEMODE is set to force-idle.
         */
        .flags          = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
-                               | HWMOD_SWSUP_MSTANDBY,
+                               | HWMOD_FORCE_MSTANDBY,
 };
 
 /* usb_otg_hs */
index 0e47d2e1687c76004ea7449b8e496340617b09d1..9e0576569e07ae2e19492b5877de7aafe3ef567c 100644 (file)
@@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
        { }
 };
 
+static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
+       { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
+};
+
 /* ocp2scp_usb_phy */
 static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
        .name           = "ocp2scp_usb_phy",
@@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
                },
        },
        .dev_attr       = ocp2scp_dev_attr,
+       .opt_clks       = ocp2scp_usb_phy_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
 };
 
 /*
index 2bdd4cf17a8fd0e80b6c2c74feb66fcc9018ecca..f62b509ed08de75e6f4191bc8cf43dc130c7cb2d 100644 (file)
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
                               clksrc_nr, clksrc_src)                   \
 void __init omap##name##_gptimer_timer_init(void)                      \
 {                                                                      \
+       if (omap_clk_init)                                              \
+               omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src);        \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void)                   \
                                clksrc_nr, clksrc_src)                  \
 void __init omap##name##_sync32k_timer_init(void)              \
 {                                                                      \
+       if (omap_clk_init)                                              \
+               omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        /* Enable the use of clocksource="gp_timer" kernel parameter */ \
index 051b62c2710208537a3442a0ec03af57c11cc530..7f2cb6c5e2c10de3b6479e8a23ced9c523329b55 100644 (file)
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
 #endif
 
 struct mmci_platform_data mop500_sdi0_data = {
-       .ios_handler    = mop500_sdi0_ios_handler,
        .ocr_mask       = MMC_VDD_29_30,
        .f_max          = 50000000,
        .capabilities   = MMC_CAP_4_BIT_DATA |
index b03457881c4b5aed70daee7ded2c182cc238d3b7..87d2d7b38ce90742a0a1b9d3ccda2e0271c1f5fb 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
 #include <linux/platform_data/i2c-nomadik.h>
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)
        regulator_put(prox_regulator);
 }
 
+void mop500_snowball_ethernet_clock_enable(void)
+{
+       struct clk *clk;
+
+       clk = clk_get_sys("fsmc", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static struct cryp_platform_data u8500_cryp1_platform_data = {
                .mem_to_engine = {
                                .dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)
        mop500_audio_init(parent);
        mop500_uart_init(parent);
 
+       mop500_snowball_ethernet_clock_enable();
+
        /* This board has full regulator constraints */
        regulator_has_full_constraints();
 }
index eaa605f5d90dc463ac62079e01d5db92e50473ca..d38951be70df01304745bcc1a62584b5c89522f3 100644 (file)
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
 void __init snowball_pinmaps_init(void);
 void __init hrefv60_pinmaps_init(void);
 void mop500_audio_init(struct device *parent);
+void mop500_snowball_ethernet_clock_enable(void);
 
 int __init mop500_uib_init(void);
 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
index 19235cf7bbe3f33c4fe4d63eee73d779624dbb69..f1a581844372881e7e0d54aac4b3465495a5941d 100644 (file)
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
        /* Pinmaps must be in place before devices register */
        if (of_machine_is_compatible("st-ericsson,mop500"))
                mop500_pinmaps_init();
-       else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
+       else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
                snowball_pinmaps_init();
-       else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+               mop500_snowball_ethernet_clock_enable();
+       } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
                hrefv60_pinmaps_init();
        else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
                /* TODO: Add pinmaps for ccu9540 board. */
index c2f37390308a20b835d653dfbb0f64f28cc8ce40..c465faca51b06b05ed50c27962b8b169198ea909 100644 (file)
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
        int lockregs;
        int i;
 
-       switch (cache_id) {
+       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                lockregs = 8;
                break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
        if (cache_id_part_number_from_dt)
                cache_id = cache_id_part_number_from_dt;
        else
-               cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
-                       & L2X0_CACHE_ID_PART_MASK;
+               cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
        aux &= aux_mask;
        aux |= aux_val;
 
        /* Determine the number of ways */
-       switch (cache_id) {
+       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                if (aux & (1 << 16))
                        ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
                .flush_all   = l2x0_flush_all,
                .inv_all     = l2x0_inv_all,
                .disable     = l2x0_disable,
-               .set_debug   = pl310_set_debug,
        },
 };
 
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
                data->save();
 
        of_init = true;
-       l2x0_init(l2x0_base, aux_val, aux_mask);
-
        memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+       l2x0_init(l2x0_base, aux_val, aux_mask);
 
        return 0;
 }
index a5a4b2bc42ba353e7e0ae94461cf4d8691b2b68a..2ac37372ef52f4ba4db642d39bef349798f1785a 100644 (file)
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
                local_flush_bp_all();
                local_flush_tlb_all();
+               dummy_flush_tlb_a15_erratum();
        }
 
        atomic64_set(&per_cpu(active_asids, cpu), asid);
index e95a996ab78f6c7d995d859f6010ef5d8df471ed..78978945492a2a105f1a9a2d18e81cd76dc1e7b6 100644 (file)
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
-                                     unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+                       unsigned long end, phys_addr_t phys,
+                       const struct mem_type *type)
 {
-       pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
        /*
-        * Try a section mapping - end, addr and phys must all be aligned
-        * to a section boundary.  Note that PMDs refer to the individual
-        * L1 entries, whereas PGDs refer to a group of L1 entries making
-        * up one logical pointer to an L2 table.
+        * In classic MMU format, puds and pmds are folded in to
+        * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+        * group of L1 entries making up one logical pointer to
+        * an L2 table (2MB), where as PMDs refer to the individual
+        * L1 entries (1MB). Hence increment to get the correct
+        * offset for odd 1MB sections.
+        * (See arch/arm/include/asm/pgtable-2level.h)
         */
-       if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
-               pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
-               if (addr & SECTION_SIZE)
-                       pmd++;
+       if (addr & SECTION_SIZE)
+               pmd++;
 #endif
+       do {
+               *pmd = __pmd(phys | type->prot_sect);
+               phys += SECTION_SIZE;
+       } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-               do {
-                       *pmd = __pmd(phys | type->prot_sect);
-                       phys += SECTION_SIZE;
-               } while (pmd++, addr += SECTION_SIZE, addr != end);
+       flush_pmd_entry(pmd);
+}
 
-               flush_pmd_entry(p);
-       } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+                                     unsigned long end, phys_addr_t phys,
+                                     const struct mem_type *type)
+{
+       pmd_t *pmd = pmd_offset(pud, addr);
+       unsigned long next;
+
+       do {
                /*
-                * No need to loop; pte's aren't interested in the
-                * individual L1 entries.
+                * With LPAE, we must loop over to map
+                * all the pmds for the given range.
                 */
-               alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
-       }
+               next = pmd_addr_end(addr, end);
+
+               /*
+                * Try a section mapping - addr, next and phys must all be
+                * aligned to a section boundary.
+                */
+               if (type->prot_sect &&
+                               ((addr | next | phys) & ~SECTION_MASK) == 0) {
+                       map_init_section(pmd, addr, next, phys, type);
+               } else {
+                       alloc_init_pte(pmd, addr, next,
+                                               __phys_to_pfn(phys), type);
+               }
+
+               phys += next - addr;
+
+       } while (pmd++, addr = next, addr != end);
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_section(pud, addr, next, phys, type);
+               alloc_init_pmd(pud, addr, next, phys, type);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
index 3a3c015f8d5c33cb99357995b6d35790037e7f94..f584d3f5b37c5855782a0259c327bc6347bba89a 100644 (file)
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
 __v7_ca7mp_proc_info:
        .long   0x410fc070
        .long   0xff0ffff0
-       __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+       __v7_proc __v7_ca7mp_setup
        .size   __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
 
        /*
@@ -430,9 +430,24 @@ __v7_ca7mp_proc_info:
 __v7_ca15mp_proc_info:
        .long   0x410fc0f0
        .long   0xff0ffff0
-       __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+       __v7_proc __v7_ca15mp_setup
        .size   __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
 
+       /*
+        * Qualcomm Inc. Krait processors.
+        */
+       .type   __krait_proc_info, #object
+__krait_proc_info:
+       .long   0x510f0400              @ Required ID value
+       .long   0xff0ffc00              @ Mask for ID
+       /*
+        * Some Krait processors don't indicate support for SDIV and UDIV
+        * instructions in the ARM instruction set, even though they actually
+        * do support them.
+        */
+       __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+       .size   __krait_proc_info, . - __krait_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index 224b44ab534ee4682ffb004e50c3d92b86a39a85..70b8cd4021c46cb800cbf76bd21a001e2e73e86c 100644 (file)
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
 {
        unsigned long size, mask;
-       bool page64k = IS_ENABLED(ARM64_64K_PAGES);
+       bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
index e34f565f595ae2d54613309af8013b2e735e9881..6f7dc8b7b35ccc8ba435caa08ed1a8ab8e7863a5 100644 (file)
@@ -291,7 +291,6 @@ cpu_idle (void)
                }
 
                if (!need_resched()) {
-                       void (*idle)(void);
 #ifdef CONFIG_SMP
                        min_xtp();
 #endif
@@ -299,9 +298,7 @@ cpu_idle (void)
                        if (mark_idle)
                                (*mark_idle)(1);
 
-                       if (!idle)
-                               idle = default_idle;
-                       (*idle)();
+                       default_idle();
                        if (mark_idle)
                                (*mark_idle)(0);
 #ifdef CONFIG_SMP
index cd2e21ff562af434803045c048dbbfbcd28a50cb..a6fdd16439036b23a68ebb05a896619950d9ab8a 100644 (file)
@@ -18,7 +18,7 @@ config MIPS
        select HAVE_KRETPROBES
        select HAVE_DEBUG_KMEMLEAK
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
-       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
        select RTC_LIB if !MACH_LOONGSON
        select GENERIC_ATOMIC64 if !64BIT
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -1493,7 +1493,6 @@ config CPU_XLP
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
-       select CPU_HAS_LLSC
        select WEAK_ORDERING
        select WEAK_REORDERING_BEYOND_LLSC
        select CPU_HAS_PREFETCH
index ed1949c295087bf81ef951539c08400507e9d603..9aa7d44898ed11cff9c70112e0e142923e27f2f4 100644 (file)
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
                strcpy(cfe_version, "unknown");
        printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
 
-       if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
-               printk(KERN_ERR PFX "invalid nvram checksum\n");
-               return;
-       }
+       bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
 
        board_name = bcm63xx_nvram_get_name();
        /* find board by name */
index 620611680839e42d1aa16741aca00526f352e39f..a4b8864f93072d8e305d6a7bcca4953b2cbcc4a5 100644 (file)
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
 static struct bcm963xx_nvram nvram;
 static int mac_addr_used;
 
-int __init bcm63xx_nvram_init(void *addr)
+void __init bcm63xx_nvram_init(void *addr)
 {
        unsigned int check_len;
        u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
        crc = crc32_le(~0, (u8 *)&nvram, check_len);
 
        if (crc != expected_crc)
-               return -EINVAL;
-
-       return 0;
+               pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+                       expected_crc, crc);
 }
 
 u8 *bcm63xx_nvram_get_name(void)
index 314231be788cd7365c55e3501187600296932c38..35e18e98beb96b04151999aac8bf037129efc324 100644 (file)
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
        return board_register_devices();
 }
 
-device_initcall(bcm63xx_register_devices);
+arch_initcall(bcm63xx_register_devices);
index 62d6a3b4d3b7b512550e558001934f3e55a7917f..4e0b6bc1165edcbae2f44663390daa2c63c017d9 100644 (file)
@@ -9,10 +9,8 @@
  *
  * Initialized the local nvram copy from the target address and checks
  * its checksum.
- *
- * Returns 0 on success.
  */
-int __init bcm63xx_nvram_init(void *nvram);
+void bcm63xx_nvram_init(void *nvram);
 
 /**
  * bcm63xx_nvram_get_name() - returns the board name according to nvram
index d9c82841903775734d6a462de251deaba1058715..193c0912d38e651519b8fb8b7bbc9fa31d7893e8 100644 (file)
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
-#ifdef CONFIG_CPU_HAS_LLSC
 #define cpu_has_llsc           1
-#else
-#define cpu_has_llsc           0
-#endif
 /* #define cpu_has_vtag_icache ? */
 /* #define cpu_has_dc_aliases  ? */
 /* #define cpu_has_ic_fills_f_dc ? */
index 12b70c25906a0d3a32d095b52d3766bc2285e115..0da44d422f5b0242380bfc0dde4973b547c1900e 100644 (file)
@@ -1166,7 +1166,10 @@ do {                                                                     \
        unsigned int __dspctl;                                          \
                                                                        \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
        "       rddsp   %0, %x1                                 \n"     \
+       "       .set pop                                        \n"     \
        : "=r" (__dspctl)                                               \
        : "i" (mask));                                                  \
        __dspctl;                                                       \
@@ -1175,30 +1178,198 @@ do {                                                                   \
 #define wrdsp(val, mask)                                               \
 do {                                                                   \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
        "       wrdsp   %0, %x1                                 \n"     \
+       "       .set pop                                        \n"     \
        :                                                               \
        : "r" (val), "i" (mask));                                       \
 } while (0)
 
-#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
-#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
-#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
-#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
-
-#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
-#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
-#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
-#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
-
-#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
-#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
-#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
-#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
-
-#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
-#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
-#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
-#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
+#define mflo0()                                                                \
+({                                                                     \
+       long mflo0;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo0));                                                \
+       mflo0;                                                          \
+})
+
+#define mflo1()                                                                \
+({                                                                     \
+       long mflo1;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo1));                                                \
+       mflo1;                                                          \
+})
+
+#define mflo2()                                                                \
+({                                                                     \
+       long mflo2;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo2));                                                \
+       mflo2;                                                          \
+})
+
+#define mflo3()                                                                \
+({                                                                     \
+       long mflo3;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mflo %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mflo3));                                                \
+       mflo3;                                                          \
+})
+
+#define mfhi0()                                                                \
+({                                                                     \
+       long mfhi0;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi0));                                                \
+       mfhi0;                                                          \
+})
+
+#define mfhi1()                                                                \
+({                                                                     \
+       long mfhi1;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi1));                                                \
+       mfhi1;                                                          \
+})
+
+#define mfhi2()                                                                \
+({                                                                     \
+       long mfhi2;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi2));                                                \
+       mfhi2;                                                          \
+})
+
+#define mfhi3()                                                                \
+({                                                                     \
+       long mfhi3;                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mfhi %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       : "=r" (mfhi3));                                                \
+       mfhi3;                                                          \
+})
+
+
+#define mtlo0(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo1(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo2(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mtlo3(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mtlo %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi0(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac0                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi1(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac1                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi2(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac2                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
+
+#define mthi3(x)                                                       \
+({                                                                     \
+       __asm__(                                                        \
+       "       .set push                                       \n"     \
+       "       .set dsp                                        \n"     \
+       "       mthi %0, $ac3                                   \n"     \
+       "       .set pop                                        \n"     \
+       :                                                               \
+       : "r" (x));                                                     \
+})
 
 #else
 
index 197f6367c201c122171b7e81cbcebc328a683a7f..8efe5a9e2c3e68a1ab5cffe718fe58bbd815abb4 100644 (file)
@@ -21,6 +21,6 @@
 #include <asm/sigcontext.h>
 #include <asm/siginfo.h>
 
-#define __ARCH_HAS_ODD_SIGACTION
+#define __ARCH_HAS_IRIX_SIGACTION
 
 #endif /* _ASM_SIGNAL_H */
index f81d98f6184c2f89ff7f69530247a2b6181245fd..de75fb50562bfec163b876e312e9b6a0d72a7c23 100644 (file)
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS)      += perf_event_mipsxx.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
 #
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
-# to enable DSP assembler support here even if the MIPS Release 2 CPU we
-# are targetting does not support DSP because all code-paths making use of
-# it properly check that the running CPU *actually does* support these
-# instructions.
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
 #
 ifeq ($(CONFIG_CPU_MIPSR2), y)
 CFLAGS_DSP                     = -DHAVE_AS_DSP
 
-#
-# Check if assembler supports DSP ASE
-#
-ifeq ($(call cc-option-yn,-mdsp), y)
-CFLAGS_DSP                     += -mdsp
-endif
-
-#
-# Check if assembler supports DSP ASE Rev2
-#
-ifeq ($(call cc-option-yn,-mdspr2), y)
-CFLAGS_DSP                     += -mdspr2
-endif
-
 CFLAGS_signal.o                        = $(CFLAGS_DSP)
 CFLAGS_signal32.o              = $(CFLAGS_DSP)
 CFLAGS_process.o               = $(CFLAGS_DSP)
index 6bfccc227a95f850d31c64ce1b6e6c863f62ba77..d069a19112e8b2e90bc59092bf05abe631df028d 100644 (file)
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                c->tlbsize = 48;
                break;
        case PRID_IMP_VR41XX:
+               set_isa(c, MIPS_CPU_ISA_III);
+               c->options = R4K_OPTS;
+               c->tlbsize = 32;
                switch (c->processor_id & 0xf0) {
                case PRID_REV_VR4111:
                        c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                                __cpu_name[cpu] = "NEC VR4131";
                        } else {
                                c->cputype = CPU_VR4133;
+                               c->options |= MIPS_CPU_LLSC;
                                __cpu_name[cpu] = "NEC VR4133";
                        }
                        break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        __cpu_name[cpu] = "NEC Vr41xx";
                        break;
                }
-               set_isa(c, MIPS_CPU_ISA_III);
-               c->options = R4K_OPTS;
-               c->tlbsize = 32;
                break;
        case PRID_IMP_R4300:
                c->cputype = CPU_R4300;
index 8eeee1c860c08cd5ea0228e8387668cc2aaa0cc5..db9655f08892d73738664e45a48e088f941b263a 100644 (file)
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
                err = compat_sys_shmctl(first, second, compat_ptr(ptr));
                break;
        default:
-               err = -EINVAL;
+               err = -ENOSYS;
                break;
        }
 
index 135c4aadccbe35d9a49415170dce3d2b9e05a697..7a54f74b7818ad402a70221eb2090dc91f5643f8 100644 (file)
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_mips_r) {
                seq_printf(m, "isa\t\t\t:");
                if (cpu_has_mips_1)
-                       seq_printf(m, "%s", "mips1");
+                       seq_printf(m, "%s", " mips1");
                if (cpu_has_mips_2)
                        seq_printf(m, "%s", " mips2");
                if (cpu_has_mips_3)
index 81f1dcfdcab8c212ebbac6c5159291dc8b8e1162..a64daee740ee0414ee6a8db474b32893ae51ab1d 100644 (file)
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a |= mask;
        raw_local_irq_restore(flags);
        return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a |= mask;
        raw_local_irq_restore(flags);
        return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a &= ~mask;
        raw_local_irq_restore(flags);
        return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
-       unsigned long res;
+       int res;
 
        a += nr >> SZLONG_LOG;
        mask = 1UL << bit;
        raw_local_irq_save(flags);
-       res = (mask & *a);
+       res = (mask & *a) != 0;
        *a ^= mask;
        raw_local_irq_restore(flags);
        return res;
index 507147aebd417d612633e1104b83fd8bff08cbce..a6adffbb4e5f0a5ccc5ec268c18c718b69c5a8da 100644 (file)
@@ -270,7 +270,7 @@ LEAF(csum_partial)
 #endif
 
        /* odd buffer alignment? */
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
        wsbh    v1, sum
        movn    sum, v1, t7
 #else
@@ -670,7 +670,7 @@ EXC(        sb      t0, NBYTES-2(dst), .Ls_exc)
        addu    sum, v1
 #endif
 
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
        wsbh    v1, sum
        movn    sum, v1, odd
 #else
index f3eab8594d9f873432deb94c770db354f20ffc76..d44a571e45a79dae9b7976c7e281c93021b2847e 100644 (file)
 #include <asm/code-patching.h>
 #include <asm/machdep.h>
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
 extern void epapr_ev_idle(void);
 extern u32 epapr_ev_idle_start[];
+#endif
 
 bool epapr_paravirt_enabled;
 
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
 
        for (i = 0; i < (len / 4); i++) {
                patch_instruction(epapr_hypercall_start + i, insts[i]);
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
                patch_instruction(epapr_ev_idle_start + i, insts[i]);
+#endif
        }
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
        if (of_get_property(hyper_node, "has-idle", NULL))
                ppc_md.power_save = epapr_ev_idle;
+#endif
 
        epapr_paravirt_enabled = true;
 
index 200afa5bcfb73da1efa111c5b22ad62b09b3bb74..56bd92362ce131e8742f33d56ff8613e05bcced0 100644 (file)
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
 #endif /* __DISABLED__ */
 
 
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
-       mflr    r10
-#ifdef CONFIG_RELOCATABLE
-       mtctr   r11
-#endif
-
-       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
-       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
-
-       bl      .slb_allocate_realmode
-
-       /* All done -- return from exception. */
-
-       ld      r10,PACA_EXSLB+EX_LR(r13)
-       ld      r3,PACA_EXSLB+EX_R3(r13)
-       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
-
-       mtlr    r10
-
-       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
-       beq-    2f
-
-.machine       push
-.machine       "power4"
-       mtcrf   0x80,r9
-       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
-.machine       pop
-
-       RESTORE_PPR_PACA(PACA_EXSLB, r9)
-       ld      r9,PACA_EXSLB+EX_R9(r13)
-       ld      r10,PACA_EXSLB+EX_R10(r13)
-       ld      r11,PACA_EXSLB+EX_R11(r13)
-       ld      r12,PACA_EXSLB+EX_R12(r13)
-       ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-
-2:     mfspr   r11,SPRN_SRR0
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10,unrecov_slb)
-       mtspr   SPRN_SRR0,r10
-       ld      r10,PACAKMSR(r13)
-       mtspr   SPRN_SRR1,r10
-       rfid
-       b       .
-
-unrecov_slb:
-       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
-       DISABLE_INTS
-       bl      .save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .unrecoverable_exception
-       b       1b
-
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
-       andc    r9,r9,r10
-       std     r9,TI_LOCAL_FLAGS(r11)
-       ld      r10,_LINK(r1)           /* make idle task do the */
-       std     r10,_NIP(r1)            /* equivalent of a blr */
-       blr
-#endif
-
        .align  7
        .globl alignment_common
 alignment_common:
@@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler)
 #endif /* CONFIG_PPC_POWERNV */
 
 
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+       mflr    r10
+#ifdef CONFIG_RELOCATABLE
+       mtctr   r11
+#endif
+
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
+
+       bl      .slb_allocate_realmode
+
+       /* All done -- return from exception. */
+
+       ld      r10,PACA_EXSLB+EX_LR(r13)
+       ld      r3,PACA_EXSLB+EX_R3(r13)
+       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
+
+       mtlr    r10
+
+       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+       beq-    2f
+
+.machine       push
+.machine       "power4"
+       mtcrf   0x80,r9
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+.machine       pop
+
+       RESTORE_PPR_PACA(PACA_EXSLB, r9)
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+
+2:     mfspr   r11,SPRN_SRR0
+       ld      r10,PACAKBASE(r13)
+       LOAD_HANDLER(r10,unrecov_slb)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .
+
+unrecov_slb:
+       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+       DISABLE_INTS
+       bl      .save_nvgprs
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .unrecoverable_exception
+       b       1b
+
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+       andc    r9,r9,r10
+       std     r9,TI_LOCAL_FLAGS(r11)
+       ld      r10,_LINK(r1)           /* make idle task do the */
+       std     r10,_NIP(r1)            /* equivalent of a blr */
+       blr
+#endif
+
 /*
  * Hash table stuff
  */
index 4a2930844d43a078492732cf156d392ac10fc29e..4a5443118cfb039d4b5e53f63b7470c292c4a431 100644 (file)
@@ -344,6 +344,7 @@ extern unsigned long MODULES_END;
 #define _REGION3_ENTRY_CO      0x100   /* change-recording override        */
 
 /* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address       */
 #define _SEGMENT_ENTRY_ORIGIN  ~0x7ffUL/* segment table origin             */
 #define _SEGMENT_ENTRY_RO      0x200   /* page protection bit              */
 #define _SEGMENT_ENTRY_INV     0x20    /* invalid segment table entry      */
@@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void);
 /*
  * No page table caches to initialise
  */
-#define pgtable_cache_init()   do { } while (0)
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
 
 #include <asm-generic/pgtable.h>
 
index dff631d34b45b2630551813454ede86ba94dca84..466fb3383960442b7624e51990536d2cf0b8c65d 100644 (file)
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
  * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  * contains the (negative) exception code.
  */
-static __always_inline unsigned long follow_table(struct mm_struct *mm,
-                                                 unsigned long addr, int write)
+#ifdef CONFIG_64BIT
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep;
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
+       switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+       case _ASCE_TYPE_REGION1:
+               table = table + ((address >> 53) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x39UL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION2:
+               table = table + ((address >> 42) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3aUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION3:
+               table = table + ((address >> 31) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3bUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_SEGMENT:
+               table = table + ((address >> 20) & 0x7ff);
+               if (unlikely(*table & _SEGMENT_ENTRY_INV))
+                       return -0x10UL;
+               if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
+                       if (write && (*table & _SEGMENT_ENTRY_RO))
+                               return -0x04UL;
+                       return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
+                               (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
+               }
+               table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       }
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
+               return -0x11UL;
+       if (write && (*table & _PAGE_RO))
+               return -0x04UL;
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
+}
 
-       pgd = pgd_offset(mm, addr);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return -0x3aUL;
+#else /* CONFIG_64BIT */
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               return -0x3bUL;
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
+{
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       table = table + ((address >> 20) & 0x7ff);
+       if (unlikely(*table & _SEGMENT_ENTRY_INV))
                return -0x10UL;
-       if (pmd_large(*pmd)) {
-               if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
-                       return -0x04UL;
-               return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
-       }
-       if (unlikely(pmd_bad(*pmd)))
-               return -0x10UL;
-
-       ptep = pte_offset_map(pmd, addr);
-       if (!pte_present(*ptep))
+       table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
                return -0x11UL;
-       if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
+       if (write && (*table & _PAGE_RO))
                return -0x04UL;
-
-       return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 }
 
+#endif /* CONFIG_64BIT */
+
 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
                                             size_t n, int write_user)
 {
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
 
 static size_t clear_user_pt(size_t n, void __user *to)
 {
-       void *zpage = &empty_zero_page;
+       void *zpage = (void *) empty_zero_page;
        long done, size, ret;
 
        done = 0;
index d1e15f7b59c68ab54ee62a137443270287f59f34..7a5aa1a7864e2371900e5ccba2d2a0f507fa834e 100644 (file)
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-/*
- * Note that the kernel can potentially support other compression
- * techniques than gz, though we don't do so by default.  If we ever
- * decide to do so we can either look for other filename extensions,
- * or just allow a file with this name to be compressed with an
- * arbitrary compressor (somewhat counterintuitively).
- */
 static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
+static char __initdata initramfs_file[128] = "initramfs";
 
 static int __init setup_initramfs_file(char *str)
 {
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
 early_param("initramfs_file", setup_initramfs_file);
 
 /*
- * We look for an "initramfs.cpio.gz" file in the hvfs.
- * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs.
+ * We look for a file called "initramfs" in the hvfs.  If there is one, we
+ * allocate some memory for it and it will be unpacked to the initramfs.
+ * If it's compressed, the initd code will uncompress it first.
  */
 static void __init load_hv_initrd(void)
 {
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
 
        fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
        if (fd == HV_ENOENT) {
-               if (set_initramfs_file)
+               if (set_initramfs_file) {
                        pr_warning("No such hvfs initramfs file '%s'\n",
                                   initramfs_file);
-               return;
+                       return;
+               } else {
+                       /* Try old backwards-compatible name. */
+                       fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
+                       if (fd == HV_ENOENT)
+                               return;
+               }
        }
        BUG_ON(fd < 0);
        stat = hv_fs_fstat(fd);
index c20d1ce62dc6a0a236280ab51b7ac8386fb1c32b..e709884d0ef9edbdd0b7228f180814bef1c0b174 100644 (file)
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
        return _hypercall3(int, console_io, cmd, count, str);
 }
 
-extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+extern int __must_check xen_physdev_op_compat(int, void *);
 
 static inline int
 HYPERVISOR_physdev_op(int cmd, void *arg)
 {
        int rc = _hypercall2(int, physdev_op, cmd, arg);
        if (unlikely(rc == -ENOSYS))
-               rc = HYPERVISOR_physdev_op_compat(cmd, arg);
+               rc = xen_physdev_op_compat(cmd, arg);
        return rc;
 }
 
index 892ce40a7470515543e09d80115efae395717b0b..7a060f4b411f753ed065c389b705e7b2a010ab7b 100644 (file)
@@ -44,6 +44,7 @@
 #define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
 
+#define MSR_PLATFORM_INFO              0x000000ce
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
index 7890bc8389524d4a1e934e23b041ab1010abf4f7..d893e8ed8ac96559b2175b00c3d68d71658a4105 100644 (file)
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
        struct microcode_intel ***mc_saved;
 
        mc_saved = (struct microcode_intel ***)
-                  __pa_symbol(&mc_saved_data->mc_saved);
+                  __pa_nodebug(&mc_saved_data->mc_saved);
        for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
                struct microcode_intel *p;
 
                p = *(struct microcode_intel **)
-                       __pa(mc_saved_data->mc_saved + i);
-               mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
+                       __pa_nodebug(mc_saved_data->mc_saved + i);
+               mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
        }
 }
 #endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
        struct cpio_data cd;
        long offset = 0;
 #ifdef CONFIG_X86_32
-       char *p = (char *)__pa_symbol(ucode_name);
+       char *p = (char *)__pa_nodebug(ucode_name);
 #else
        char *p = ucode_name;
 #endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
        if (mc_intel == NULL)
                return;
 
-       delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
-       current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
+       delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
+       current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 
        *delay_ucode_info_p = 1;
        *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
 }
 #endif
 
-static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
-                                struct ucode_cpu_info *uci)
+static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
+                                          struct ucode_cpu_info *uci)
 {
        struct microcode_intel *mc_intel;
        unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
 #ifdef CONFIG_X86_32
        struct boot_params *boot_params_p;
 
-       boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
+       boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
        ramdisk_image = boot_params_p->hdr.ramdisk_image;
        ramdisk_size  = boot_params_p->hdr.ramdisk_size;
        initrd_start_early = ramdisk_image;
        initrd_end_early = initrd_start_early + ramdisk_size;
 
        _load_ucode_intel_bsp(
-               (struct mc_saved_data *)__pa_symbol(&mc_saved_data),
-               (unsigned long *)__pa_symbol(&mc_saved_in_initrd),
+               (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+               (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
                initrd_start_early, initrd_end_early, &uci);
 #else
        ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
        unsigned long *initrd_start_p;
 
        mc_saved_in_initrd_p =
-               (unsigned long *)__pa_symbol(mc_saved_in_initrd);
-       mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
-       initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
-       initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
+               (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+       mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+       initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+       initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
 #else
        mc_saved_data_p = &mc_saved_data;
        mc_saved_in_initrd_p = mc_saved_in_initrd;
index 05928aae911e0addaaaf1bd0fce5002ba3c9f96e..906fea3157919dcffabfe2bc4b23a1becb98aa7c 100644 (file)
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
        char c;
        unsigned zero_len;
 
-       for (; len; --len) {
+       for (; len; --len, to++) {
                if (__get_user_nocheck(c, from++, sizeof(char)))
                        break;
-               if (__put_user_nocheck(c, to++, sizeof(char)))
+               if (__put_user_nocheck(c, to, sizeof(char)))
                        break;
        }
 
index e8e34938c57d86be6ba133abb033fbd7616d8f9a..6afbb2ca9a0ac450f79c6628b7cd9cd7c2ceeccf 100644 (file)
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
        __xen_write_cr3(true, cr3);
 
        xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
-
-       pv_mmu_ops.write_cr3 = &xen_write_cr3;
 }
 #endif
 
@@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
 #endif
 
 #ifdef CONFIG_X86_64
+       pv_mmu_ops.write_cr3 = &xen_write_cr3;
        SetPagePinned(virt_to_page(level3_user_vsyscall));
 #endif
        xen_mark_init_mm_pinned();
index db8f1b5078570fe98afb8dd748636060511ba683..cc2b827a853cdea378f1c54edeadd65546b33651 100644 (file)
@@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
         * copied from blk_rq_pos(rq).
         */
        if (error_sector)
-               *error_sector = bio->bi_sector;
+               *error_sector = bio->bi_sector;
 
        if (!bio_flagged(bio, BIO_UPTODATE))
                ret = -EIO;
index 789cdea05893bb8e3420ede5d1aa65e7af408e1b..ae95ee6a58aad6d005affa10bb270bdf1008e986 100644 (file)
@@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
 
        hd_struct_put(part);
 }
+EXPORT_SYMBOL(delete_partition);
 
 static ssize_t whole_disk_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
index 1e5d8a40101e274f5d6ce2bfe1a24a486a671af7..fefc2ca7cc3e0199b5f98b707107d424dd40b8eb 100644 (file)
@@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
                return rc;
        data_len = estatus->data_length;
        gdata = (struct acpi_hest_generic_data *)(estatus + 1);
-       while (data_len > sizeof(*gdata)) {
+       while (data_len >= sizeof(*gdata)) {
                gedata_len = gdata->error_data_length;
                if (gedata_len > data_len - sizeof(*gdata))
                        return -EINVAL;
index 0ac546d5e53f5b0f079792c71419dd07001fe892..5ff17306612771d04dd684020041fe61056906ab 100644 (file)
@@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
 
 static void handle_root_bridge_removal(struct acpi_device *device)
 {
+       acpi_status status;
        struct acpi_eject_event *ej_event;
 
        ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
        ej_event->device = device;
        ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
 
-       acpi_bus_hot_remove_device(ej_event);
+       status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+       if (ACPI_FAILURE(status))
+               kfree(ej_event);
 }
 
 static void _handle_hotplug_event_root(struct work_struct *work)
@@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
        handle = hp_work->handle;
        type = hp_work->type;
 
-       root = acpi_pci_find_root(handle);
+       acpi_scan_lock_acquire();
 
+       root = acpi_pci_find_root(handle);
        acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
        switch (type) {
@@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
                break;
        }
 
+       acpi_scan_lock_release();
        kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
        kfree(buffer.pointer);
 }
index 24213033fbae2aee5f033c2a776d2b37084fc88e..9c1a435d10e69c8228516346832a8d2d2d242890 100644 (file)
@@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VGN-FW21M",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
        .ident = "Sony Vaio VPCEB17FX",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
index e6732cf7c06eea7aedf62e383a390ef3e61c3fe2..79f4fca9877a579cd3b5c787fa7de4c1ed75612f 100644 (file)
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
                        base = 0;
 
                if (max < rbnode->base_reg + rbnode->blklen)
-                       end = rbnode->base_reg + rbnode->blklen - max;
+                       end = max - rbnode->base_reg + 1;
                else
                        end = rbnode->blklen;
 
index 3d2367501fd0521ee4e472df6126b6ea25ee09a3..d34adef1e63e2bee55aa3cd5a9e7f2d98912103b 100644 (file)
@@ -710,12 +710,12 @@ skip_format_initialization:
                }
        }
 
+       regmap_debugfs_init(map, config->name);
+
        ret = regcache_init(map, config);
        if (ret != 0)
                goto err_range;
 
-       regmap_debugfs_init(map, config->name);
-
        /* Add a devres resource for dev_get_regmap() */
        m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
        if (!m) {
@@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                unsigned int ival;
                int val_bytes = map->format.val_bytes;
                for (i = 0; i < val_len / val_bytes; i++) {
-                       memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
-                       ival = map->format.parse_val(map->work_buf);
+                       ival = map->format.parse_val(val + (i * val_bytes));
                        ret = regcache_write(map, reg + (i * map->reg_stride),
                                             ival);
                        if (ret) {
@@ -1036,6 +1035,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                        kfree(async->work_buf);
                        kfree(async);
                }
+
+               return ret;
        }
 
        trace_regmap_hw_write_start(map->dev, reg,
index 5dc0daed8fac523f5d631c3cd4a022f6fb4c5aca..b81ddfea1da075ad3c1d0cbeb9ab7bf854593436 100644 (file)
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
          If unsure, say N.
 
 config BLK_DEV_RSXX
-       tristate "RamSam PCIe Flash SSD Device Driver"
+       tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
        depends on PCI
        help
          Device driver for IBM's high speed PCIe SSD
-         storage devices: RamSan-70 and RamSan-80.
+         storage devices: FlashSystem-70 and FlashSystem-80.
 
          To compile this driver as a module, choose M here: the
          module will be called rsxx.
index 25ef5c014fcaa4255e0e3009991b6a9ac1ff18c7..92b6d7c51e39590b3780f17c88737009b7becbbf 100644 (file)
@@ -51,8 +51,9 @@ new_skb(ulong len)
 {
        struct sk_buff *skb;
 
-       skb = alloc_skb(len, GFP_ATOMIC);
+       skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
        if (skb) {
+               skb_reserve(skb, MAX_HEADER);
                skb_reset_mac_header(skb);
                skb_reset_network_header(skb);
                skb->protocol = __constant_htons(ETH_P_AOE);
index ade58bc8f3c4dee67f69edde4ce20b1ef18d7cc8..1c1b8e544aa250d38710b47c19353c9ef5773c6c 100644 (file)
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
        if (rc)
                return rc;
        h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
-               cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
+               cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
        if (!h->cfgtable)
                return -ENOMEM;
        rc = write_driver_ver_to_cfgtable(h->cfgtable);
index 747bb2af69dcc55fec530466a9f55d06914ba8f7..2c127f9c3f3bf9e5966172e5255efa3a0d35b595 100644 (file)
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
        if (lo->lo_flags & LO_FLAGS_PARTSCAN)
                ioctl_by_bdev(bdev, BLKRRPART, 0);
+
+       /* Grab the block_device to prevent its destruction after we
+        * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+        */
+       bdgrab(bdev);
        return 0;
 
 out_clr:
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo)
        memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
        memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
        memset(lo->lo_file_name, 0, LO_NAME_SIZE);
-       if (bdev)
+       if (bdev) {
+               bdput(bdev);
                invalidate_bdev(bdev);
+       }
        set_capacity(lo->lo_disk, 0);
        loop_sysfs_exit(lo);
        if (bdev) {
@@ -1044,12 +1051,29 @@ static int loop_clr_fd(struct loop_device *lo)
        lo->lo_state = Lo_unbound;
        /* This is safe: open() is still holding a reference. */
        module_put(THIS_MODULE);
-       if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
-               ioctl_by_bdev(bdev, BLKRRPART, 0);
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
        mutex_unlock(&lo->lo_ctl_mutex);
+
+       /*
+        * Remove all partitions, since BLKRRPART won't remove user
+        * added partitions when max_part=0
+        */
+       if (bdev) {
+               struct disk_part_iter piter;
+               struct hd_struct *part;
+
+               mutex_lock_nested(&bdev->bd_mutex, 1);
+               invalidate_partition(bdev->bd_disk, 0);
+               disk_part_iter_init(&piter, bdev->bd_disk,
+                                       DISK_PITER_INCL_EMPTY);
+               while ((part = disk_part_iter_next(&piter)))
+                       delete_partition(bdev->bd_disk, part->partno);
+               disk_part_iter_exit(&piter);
+               mutex_unlock(&bdev->bd_mutex);
+       }
+
        /*
         * Need not hold lo_ctl_mutex to fput backing file.
         * Calling fput holding lo_ctl_mutex triggers a circular
@@ -1623,6 +1647,7 @@ static int loop_add(struct loop_device **l, int i)
                goto out_free_dev;
        i = err;
 
+       err = -ENOMEM;
        lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
        if (!lo->lo_queue)
                goto out_free_dev;
index 1788f491e0fb50e13cefb2f81c286d41721e75ef..076ae7f1b781e0b1586631b2708471ce98f978c1 100644 (file)
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
        gpio_direction_output(host->rst, 1);
 
        /* reset out pin */
-       if (!(prv_data->dev_attr & MG_DEV_MASK))
+       if (!(prv_data->dev_attr & MG_DEV_MASK)) {
+               err = -EINVAL;
                goto probe_err_3a;
+       }
 
        if (prv_data->dev_attr != MG_BOOT_DEV) {
                rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
index 11cc9522cdd439ff3db29825037c4f866bc28abb..92250af84e7d151a8da57ed2cf6d22d8e3313e9d 100644 (file)
@@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
        dd->isr_workq = create_workqueue(dd->workq_name);
        if (!dd->isr_workq) {
                dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
+               rv = -ENOMEM;
                goto block_initialize_err;
        }
 
@@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
        INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
 
        pci_set_master(pdev);
-       if (pci_enable_msi(pdev)) {
+       rv = pci_enable_msi(pdev);
+       if (rv) {
                dev_warn(&pdev->dev,
                        "Unable to enable MSI interrupt.\n");
                goto block_initialize_err;
index 6c81a4c040b987e75d4a8b0bb5e4ff65e2108dba..f556f8a8b3f9b476949c6133f39778c49502e380 100644 (file)
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
        return atomic_read(&obj_request->done) != 0;
 }
 
+static void
+rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
+               obj_request, obj_request->img_request, obj_request->result,
+               obj_request->xferred, obj_request->length);
+       /*
+        * ENOENT means a hole in the image.  We zero-fill the
+        * entire length of the request.  A short read also implies
+        * zero-fill to the end of the request.  Either way we
+        * update the xferred count to indicate the whole request
+        * was satisfied.
+        */
+       BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
+       if (obj_request->result == -ENOENT) {
+               zero_bio_chain(obj_request->bio_list, 0);
+               obj_request->result = 0;
+               obj_request->xferred = obj_request->length;
+       } else if (obj_request->xferred < obj_request->length &&
+                       !obj_request->result) {
+               zero_bio_chain(obj_request->bio_list, obj_request->xferred);
+               obj_request->xferred = obj_request->length;
+       }
+       obj_request_done_set(obj_request);
+}
+
 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p cb %p\n", __func__, obj_request,
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
                obj_request->result, obj_request->xferred, obj_request->length);
-       /*
-        * ENOENT means a hole in the object.  We zero-fill the
-        * entire length of the request.  A short read also implies
-        * zero-fill to the end of the request.  Either way we
-        * update the xferred count to indicate the whole request
-        * was satisfied.
-        */
-       if (obj_request->result == -ENOENT) {
-               zero_bio_chain(obj_request->bio_list, 0);
-               obj_request->result = 0;
-               obj_request->xferred = obj_request->length;
-       } else if (obj_request->xferred < obj_request->length &&
-                       !obj_request->result) {
-               zero_bio_chain(obj_request->bio_list, obj_request->xferred);
-               obj_request->xferred = obj_request->length;
-       }
-       obj_request_done_set(obj_request);
+       if (obj_request->img_request)
+               rbd_img_obj_request_read_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
 }
 
 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
index f35cd0b71f7b5802ae521d046c8b582386e6c6f3..b1c53c0aa450c47ddd38bf2138bb0505f341fc61 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
-rsxx-y := config.o core.o cregs.o dev.o dma.o
+rsxx-objs := config.o core.o cregs.o dev.o dma.o
index a295e7e9ee41c98613ded6756ff725a615b5bafb..10cd530d3e1026fcb270e1b402a3a900363a7765 100644 (file)
 #include "rsxx_priv.h"
 #include "rsxx_cfg.h"
 
-static void initialize_config(void *config)
+static void initialize_config(struct rsxx_card_cfg *cfg)
 {
-       struct rsxx_card_cfg *cfg = config;
-
        cfg->hdr.version = RSXX_CFG_VERSION;
 
        cfg->data.block_size        = RSXX_HW_BLK_SIZE;
        cfg->data.stripe_size       = RSXX_HW_BLK_SIZE;
-       cfg->data.vendor_id         = RSXX_VENDOR_ID_TMS_IBM;
+       cfg->data.vendor_id         = RSXX_VENDOR_ID_IBM;
        cfg->data.cache_order       = (-1);
        cfg->data.intr_coal.mode    = RSXX_INTR_COAL_DISABLED;
        cfg->data.intr_coal.count   = 0;
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
        } else {
                dev_info(CARD_TO_DEV(card),
                        "Initializing card configuration.\n");
-               initialize_config(card);
+               initialize_config(&card->config);
                st = rsxx_save_config(card);
                if (st)
                        return st;
index e5162487686aef6f429b86655e22e27c9a14cddd..5af21f2db29cd1246397e3b4eed8627ba314adf2 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include <linux/genhd.h>
 #include <linux/idr.h>
@@ -39,8 +40,8 @@
 
 #define NO_LEGACY 0
 
-MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
-MODULE_AUTHOR("IBM <support@ramsan.com>");
+MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
+MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
 static DEFINE_SPINLOCK(rsxx_ida_lock);
 
 /*----------------- Interrupt Control & Handling -------------------*/
+
+static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
+{
+       card->isr_mask = 0;
+       card->ier_mask = 0;
+}
+
 static void __enable_intr(unsigned int *mask, unsigned int intr)
 {
        *mask |= intr;
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
  */
 void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 {
-       if (unlikely(card->halt))
+       if (unlikely(card->halt) ||
+           unlikely(card->eeh_state))
                return;
 
        __enable_intr(&card->ier_mask, intr);
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 
 void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 {
+       if (unlikely(card->eeh_state))
+               return;
+
        __disable_intr(&card->ier_mask, intr);
        iowrite32(card->ier_mask, card->regmap + IER);
 }
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
                                 unsigned int intr)
 {
-       if (unlikely(card->halt))
+       if (unlikely(card->halt) ||
+           unlikely(card->eeh_state))
                return;
 
        __enable_intr(&card->isr_mask, intr);
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
                                  unsigned int intr)
 {
+       if (unlikely(card->eeh_state))
+               return;
+
        __disable_intr(&card->isr_mask, intr);
        __disable_intr(&card->ier_mask, intr);
        iowrite32(card->ier_mask, card->regmap + IER);
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
        do {
                reread_isr = 0;
 
+               if (unlikely(card->eeh_state))
+                       break;
+
                isr = ioread32(card->regmap + ISR);
                if (isr == 0xffffffff) {
                        /*
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
 }
 
 /*----------------- Card Event Handler -------------------*/
-static char *rsxx_card_state_to_str(unsigned int state)
+static const char * const rsxx_card_state_to_str(unsigned int state)
 {
-       static char *state_strings[] = {
+       static const char * const state_strings[] = {
                "Unknown", "Shutdown", "Starting", "Formatting",
                "Uninitialized", "Good", "Shutting Down",
                "Fault", "Read Only Fault", "dStroying"
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
        return 0;
 }
 
+static int rsxx_eeh_frozen(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       int i;
+       int st;
+
+       dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
+
+       card->eeh_state = 1;
+       rsxx_mask_interrupts(card);
+
+       /*
+        * We need to guarantee that the write for eeh_state and masking
+        * interrupts does not become reordered. This will prevent a possible
+        * race condition with the EEH code.
+        */
+       wmb();
+
+       pci_disable_device(dev);
+
+       st = rsxx_eeh_save_issued_dmas(card);
+       if (st)
+               return st;
+
+       rsxx_eeh_save_issued_creg(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               if (card->ctrl[i].status.buf)
+                       pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+                                           card->ctrl[i].status.buf,
+                                           card->ctrl[i].status.dma_addr);
+               if (card->ctrl[i].cmd.buf)
+                       pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+                                           card->ctrl[i].cmd.buf,
+                                           card->ctrl[i].cmd.dma_addr);
+       }
+
+       return 0;
+}
+
+static void rsxx_eeh_failure(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       int i;
+
+       dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
+
+       card->eeh_state = 1;
+
+       for (i = 0; i < card->n_targets; i++)
+               del_timer_sync(&card->ctrl[i].activity_timer);
+
+       rsxx_eeh_cancel_dmas(card);
+}
+
+static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
+{
+       unsigned int status;
+       int iter = 0;
+
+       /* We need to wait for the hardware to reset */
+       while (iter++ < 10) {
+               status = ioread32(card->regmap + PCI_RECONFIG);
+
+               if (status & RSXX_FLUSH_BUSY) {
+                       ssleep(1);
+                       continue;
+               }
+
+               if (status & RSXX_FLUSH_TIMEOUT)
+                       dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
+               return 0;
+       }
+
+       /* Hardware failed resetting itself. */
+       return -1;
+}
+
+static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
+                                           enum pci_channel_state error)
+{
+       int st;
+
+       if (dev->revision < RSXX_EEH_SUPPORT)
+               return PCI_ERS_RESULT_NONE;
+
+       if (error == pci_channel_io_perm_failure) {
+               rsxx_eeh_failure(dev);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       st = rsxx_eeh_frozen(dev);
+       if (st) {
+               dev_err(&dev->dev, "Slot reset setup failed\n");
+               rsxx_eeh_failure(dev);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       unsigned long flags;
+       int i;
+       int st;
+
+       dev_warn(&dev->dev,
+               "IBM FlashSystem PCI: recovering from slot reset.\n");
+
+       st = pci_enable_device(dev);
+       if (st)
+               goto failed_hw_setup;
+
+       pci_set_master(dev);
+
+       st = rsxx_eeh_fifo_flush_poll(card);
+       if (st)
+               goto failed_hw_setup;
+
+       rsxx_dma_queue_reset(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
+               if (st)
+                       goto failed_hw_buffers_init;
+       }
+
+       if (card->config_valid)
+               rsxx_dma_configure(card);
+
+       /* Clears the ISR register from spurious interrupts */
+       st = ioread32(card->regmap + ISR);
+
+       card->eeh_state = 0;
+
+       st = rsxx_eeh_remap_dmas(card);
+       if (st)
+               goto failed_remap_dmas;
+
+       spin_lock_irqsave(&card->irq_lock, flags);
+       if (card->n_targets & RSXX_MAX_TARGETS)
+               rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
+       else
+               rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       rsxx_kick_creg_queue(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               if (list_empty(&card->ctrl[i].queue)) {
+                       spin_unlock(&card->ctrl[i].queue_lock);
+                       continue;
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+
+               queue_work(card->ctrl[i].issue_wq,
+                               &card->ctrl[i].issue_dma_work);
+       }
+
+       dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
+
+       return PCI_ERS_RESULT_RECOVERED;
+
+failed_hw_buffers_init:
+failed_remap_dmas:
+       for (i = 0; i < card->n_targets; i++) {
+               if (card->ctrl[i].status.buf)
+                       pci_free_consistent(card->dev,
+                                       STATUS_BUFFER_SIZE8,
+                                       card->ctrl[i].status.buf,
+                                       card->ctrl[i].status.dma_addr);
+               if (card->ctrl[i].cmd.buf)
+                       pci_free_consistent(card->dev,
+                                       COMMAND_BUFFER_SIZE8,
+                                       card->ctrl[i].cmd.buf,
+                                       card->ctrl[i].cmd.dma_addr);
+       }
+failed_hw_setup:
+       rsxx_eeh_failure(dev);
+       return PCI_ERS_RESULT_DISCONNECT;
+
+}
+
 /*----------------- Driver Initialization & Setup -------------------*/
 /* Returns:   0 if the driver is compatible with the device
             -1 if the driver is NOT compatible with the device */
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
 
        spin_lock_init(&card->irq_lock);
        card->halt = 0;
+       card->eeh_state = 0;
 
        spin_lock_irq(&card->irq_lock);
        rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
        rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
        spin_unlock_irqrestore(&card->irq_lock, flags);
 
-       /* Prevent work_structs from re-queuing themselves. */
-       card->halt = 1;
-
        cancel_work_sync(&card->event_work);
 
        rsxx_destroy_dev(card);
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
        spin_lock_irqsave(&card->irq_lock, flags);
        rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
        spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       /* Prevent work_structs from re-queuing themselves. */
+       card->halt = 1;
+
        free_irq(dev->irq, card);
 
        if (!force_legacy)
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
        card_shutdown(card);
 }
 
+static const struct pci_error_handlers rsxx_err_handler = {
+       .error_detected = rsxx_error_detected,
+       .slot_reset     = rsxx_slot_reset,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
        {0,},
 };
 
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
        .remove         = rsxx_pci_remove,
        .suspend        = rsxx_pci_suspend,
        .shutdown       = rsxx_pci_shutdown,
+       .err_handler    = &rsxx_err_handler,
 };
 
 static int __init rsxx_core_init(void)
index 80bbe639fccd72d002d269f6486e58ace210aab9..4b5c020a0a65ad8831d75bf3407dd81b2d82b306 100644 (file)
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
 #error Unknown endianess!!! Aborting...
 #endif
 
-static void copy_to_creg_data(struct rsxx_cardinfo *card,
+static int copy_to_creg_data(struct rsxx_cardinfo *card,
                              int cnt8,
                              void *buf,
                              unsigned int stream)
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
        int i = 0;
        u32 *data = buf;
 
+       if (unlikely(card->eeh_state))
+               return -EIO;
+
        for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
                /*
                 * Firmware implementation makes it necessary to byte swap on
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
                else
                        iowrite32(data[i], card->regmap + CREG_DATA(i));
        }
+
+       return 0;
 }
 
 
-static void copy_from_creg_data(struct rsxx_cardinfo *card,
+static int copy_from_creg_data(struct rsxx_cardinfo *card,
                                int cnt8,
                                void *buf,
                                unsigned int stream)
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
        int i = 0;
        u32 *data = buf;
 
+       if (unlikely(card->eeh_state))
+               return -EIO;
+
        for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
                /*
                 * Firmware implementation makes it necessary to byte swap on
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
                else
                        data[i] = ioread32(card->regmap + CREG_DATA(i));
        }
-}
-
-static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
-{
-       struct creg_cmd *cmd;
 
-       /*
-        * Spin lock is needed because this can be called in atomic/interrupt
-        * context.
-        */
-       spin_lock_bh(&card->creg_ctrl.lock);
-       cmd = card->creg_ctrl.active_cmd;
-       card->creg_ctrl.active_cmd = NULL;
-       spin_unlock_bh(&card->creg_ctrl.lock);
-
-       return cmd;
+       return 0;
 }
 
 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
 {
+       int st;
+
+       if (unlikely(card->eeh_state))
+               return;
+
        iowrite32(cmd->addr, card->regmap + CREG_ADD);
        iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
 
        if (cmd->op == CREG_OP_WRITE) {
-               if (cmd->buf)
-                       copy_to_creg_data(card, cmd->cnt8,
-                                         cmd->buf, cmd->stream);
+               if (cmd->buf) {
+                       st = copy_to_creg_data(card, cmd->cnt8,
+                                              cmd->buf, cmd->stream);
+                       if (st)
+                               return;
+               }
        }
 
-       /*
-        * Data copy must complete before initiating the command. This is
-        * needed for weakly ordered processors (i.e. PowerPC), so that all
-        * neccessary registers are written before we kick the hardware.
-        */
-       wmb();
+       if (unlikely(card->eeh_state))
+               return;
 
        /* Setting the valid bit will kick off the command. */
        iowrite32(cmd->op, card->regmap + CREG_CMD);
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
        cmd->cb_private = cb_private;
        cmd->status     = 0;
 
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_add_tail(&cmd->list, &card->creg_ctrl.queue);
        card->creg_ctrl.q_depth++;
        creg_kick_queue(card);
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        return 0;
 }
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
        struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
        struct creg_cmd *cmd;
 
-       cmd = pop_active_cmd(card);
+       spin_lock(&card->creg_ctrl.lock);
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       spin_unlock(&card->creg_ctrl.lock);
+
        if (cmd == NULL) {
                card->creg_ctrl.creg_stats.creg_timeout++;
                dev_warn(CARD_TO_DEV(card),
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
        if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
                card->creg_ctrl.creg_stats.failed_cancel_timer++;
 
-       cmd = pop_active_cmd(card);
+       spin_lock_bh(&card->creg_ctrl.lock);
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       spin_unlock_bh(&card->creg_ctrl.lock);
+
        if (cmd == NULL) {
                dev_err(CARD_TO_DEV(card),
                        "Spurious creg interrupt!\n");
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
                        goto creg_done;
                }
 
-               copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
+               st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
        }
 
 creg_done:
@@ -296,10 +302,10 @@ creg_done:
 
        kmem_cache_free(creg_cmd_pool, cmd);
 
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        card->creg_ctrl.active = 0;
        creg_kick_queue(card);
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 }
 
 static void creg_reset(struct rsxx_cardinfo *card)
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
                "Resetting creg interface for recovery\n");
 
        /* Cancel outstanding commands */
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
                list_del(&cmd->list);
                card->creg_ctrl.q_depth--;
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
 
                card->creg_ctrl.active = 0;
        }
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        card->creg_ctrl.reset = 0;
        spin_lock_irqsave(&card->irq_lock, flags);
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
                return st;
 
        /*
-        * This timeout is neccessary for unresponsive hardware. The additional
+        * This timeout is necessary for unresponsive hardware. The additional
         * 20 seconds to used to guarantee that each cregs requests has time to
         * complete.
         */
-       timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
-                               card->creg_ctrl.q_depth) + 20000);
+       timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
+                                  card->creg_ctrl.q_depth + 20000);
 
        /*
         * The creg interface is guaranteed to complete. It has a timeout
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
        return 0;
 }
 
+void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
+{
+       struct creg_cmd *cmd = NULL;
+
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+
+       if (cmd) {
+               del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+               spin_lock_bh(&card->creg_ctrl.lock);
+               list_add(&cmd->list, &card->creg_ctrl.queue);
+               card->creg_ctrl.q_depth++;
+               card->creg_ctrl.active = 0;
+               spin_unlock_bh(&card->creg_ctrl.lock);
+       }
+}
+
+void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
+{
+       spin_lock_bh(&card->creg_ctrl.lock);
+       if (!list_empty(&card->creg_ctrl.queue))
+               creg_kick_queue(card);
+       spin_unlock_bh(&card->creg_ctrl.lock);
+}
+
 /*------------ Initialization & Setup --------------*/
 int rsxx_creg_setup(struct rsxx_cardinfo *card)
 {
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
        int cnt = 0;
 
        /* Cancel outstanding commands */
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
                list_del(&cmd->list);
                if (cmd->cb)
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
                        "Canceled active creg command\n");
                kmem_cache_free(creg_cmd_pool, cmd);
        }
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        cancel_work_sync(&card->creg_ctrl.done_work);
 }
index 63176e67662f5f48d2b358c39d93d86735c21c72..0607513cfb41fa4b2ff88bac32be0665c645a2bb 100644 (file)
@@ -28,7 +28,7 @@
 struct rsxx_dma {
        struct list_head         list;
        u8                       cmd;
-       unsigned int             laddr;     /* Logical address on the ramsan */
+       unsigned int             laddr;     /* Logical address */
        struct {
                u32              off;
                u32              cnt;
@@ -81,9 +81,6 @@ enum rsxx_hw_status {
        HW_STATUS_FAULT         = 0x08,
 };
 
-#define STATUS_BUFFER_SIZE8     4096
-#define COMMAND_BUFFER_SIZE8    4096
-
 static struct kmem_cache *rsxx_dma_pool;
 
 struct dma_tracker {
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
        return tgt;
 }
 
-static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
+void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
 {
        /* Reset all DMA Command/Status Queues */
        iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
        u32 q_depth = 0;
        u32 intr_coal;
 
-       if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
+       if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
+           unlikely(card->eeh_state))
                return;
 
        for (i = 0; i < card->n_targets; i++)
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
 }
 
 /*----------------- RSXX DMA Handling -------------------*/
-static void rsxx_complete_dma(struct rsxx_cardinfo *card,
+static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
                                  struct rsxx_dma *dma,
                                  unsigned int status)
 {
        if (status & DMA_SW_ERR)
-               printk_ratelimited(KERN_ERR
-                                  "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_sw_err++;
        if (status & DMA_HW_FAULT)
-               printk_ratelimited(KERN_ERR
-                                  "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_hw_fault++;
        if (status & DMA_CANCELLED)
-               printk_ratelimited(KERN_ERR
-                                  "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_cancelled++;
 
        if (dma->dma_addr)
-               pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
+               pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+                              get_dma_size(dma),
                               dma->cmd == HW_CMD_BLK_WRITE ?
                                           PCI_DMA_TODEVICE :
                                           PCI_DMA_FROMDEVICE);
 
        if (dma->cb)
-               dma->cb(card, dma->cb_data, status ? 1 : 0);
+               dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
 
        kmem_cache_free(rsxx_dma_pool, dma);
 }
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
        if (requeue_cmd)
                rsxx_requeue_dma(ctrl, dma);
        else
-               rsxx_complete_dma(ctrl->card, dma, status);
+               rsxx_complete_dma(ctrl, dma, status);
 }
 
 static void dma_engine_stalled(unsigned long data)
 {
        struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
 
-       if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+       if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
        ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
        hw_cmd_buf = ctrl->cmd.buf;
 
-       if (unlikely(ctrl->card->halt))
+       if (unlikely(ctrl->card->halt) ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        while (1) {
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
                 */
                if (unlikely(ctrl->card->dma_fault)) {
                        push_tracker(ctrl->trackers, tag);
-                       rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
+                       rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
                        continue;
                }
 
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
 
        /* Let HW know we've queued commands. */
        if (cmds_pending) {
-               /*
-                * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
-                * (which is in PCI-consistent system-memory) from the loop
-                * above make it into the coherency domain before the
-                * following PIO "trigger" updating the cmd.idx.  A WMB is
-                * sufficient. We need not explicitly CPU cache-flush since
-                * the memory is a PCI-consistent (ie; coherent) mapping.
-                */
-               wmb();
-
                atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
                mod_timer(&ctrl->activity_timer,
                          jiffies + DMA_ACTIVITY_TIMEOUT);
+
+               if (unlikely(ctrl->card->eeh_state)) {
+                       del_timer_sync(&ctrl->activity_timer);
+                       return;
+               }
+
                iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
        }
 }
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
        hw_st_buf = ctrl->status.buf;
 
        if (unlikely(ctrl->card->halt) ||
-           unlikely(ctrl->card->dma_fault))
+           unlikely(ctrl->card->dma_fault) ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
                if (status)
                        rsxx_handle_dma_error(ctrl, dma, status);
                else
-                       rsxx_complete_dma(ctrl->card, dma, 0);
+                       rsxx_complete_dma(ctrl, dma, 0);
 
                push_tracker(ctrl->trackers, tag);
 
@@ -727,20 +719,54 @@ bvec_err:
 
 
 /*----------------- DMA Engine Initialization & Setup -------------------*/
+int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
+{
+       ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
+                               &ctrl->status.dma_addr);
+       ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
+                               &ctrl->cmd.dma_addr);
+       if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
+               return -ENOMEM;
+
+       memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->status.dma_addr),
+               ctrl->regmap + SB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->status.dma_addr),
+               ctrl->regmap + SB_ADD_HI);
+
+       memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
+
+       ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
+       if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
+                       ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
+       iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
+
+       ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
+       if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
+                       ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+
+       return 0;
+}
+
 static int rsxx_dma_ctrl_init(struct pci_dev *dev,
                                  struct rsxx_dma_ctrl *ctrl)
 {
        int i;
+       int st;
 
        memset(&ctrl->stats, 0, sizeof(ctrl->stats));
 
-       ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
-                                               &ctrl->status.dma_addr);
-       ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
-                                            &ctrl->cmd.dma_addr);
-       if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
-               return -ENOMEM;
-
        ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
        if (!ctrl->trackers)
                return -ENOMEM;
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
        INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
        INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
 
-       memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
-       iowrite32(lower_32_bits(ctrl->status.dma_addr),
-                 ctrl->regmap + SB_ADD_LO);
-       iowrite32(upper_32_bits(ctrl->status.dma_addr),
-                 ctrl->regmap + SB_ADD_HI);
-
-       memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
-       iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
-       iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
-
-       ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
-       if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
-               dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
-                        ctrl->status.idx);
-               return -EINVAL;
-       }
-       iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
-       iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
-
-       ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
-       if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
-               dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
-                        ctrl->status.idx);
-               return -EINVAL;
-       }
-       iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
-       iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
-
-       wmb();
+       st = rsxx_hw_buffers_init(dev, ctrl);
+       if (st)
+               return st;
 
        return 0;
 }
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static int rsxx_dma_configure(struct rsxx_cardinfo *card)
+int rsxx_dma_configure(struct rsxx_cardinfo *card)
 {
        u32 intr_coal;
 
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
        }
 }
 
+int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
+{
+       int i;
+       int j;
+       int cnt;
+       struct rsxx_dma *dma;
+       struct list_head *issued_dmas;
+
+       issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
+                             GFP_KERNEL);
+       if (!issued_dmas)
+               return -ENOMEM;
+
+       for (i = 0; i < card->n_targets; i++) {
+               INIT_LIST_HEAD(&issued_dmas[i]);
+               cnt = 0;
+               for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
+                       dma = get_tracker_dma(card->ctrl[i].trackers, j);
+                       if (dma == NULL)
+                               continue;
+
+                       if (dma->cmd == HW_CMD_BLK_WRITE)
+                               card->ctrl[i].stats.writes_issued--;
+                       else if (dma->cmd == HW_CMD_BLK_DISCARD)
+                               card->ctrl[i].stats.discards_issued--;
+                       else
+                               card->ctrl[i].stats.reads_issued--;
+
+                       list_add_tail(&dma->list, &issued_dmas[i]);
+                       push_tracker(card->ctrl[i].trackers, j);
+                       cnt++;
+               }
+
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_splice(&issued_dmas[i], &card->ctrl[i].queue);
+
+               atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
+               card->ctrl[i].stats.sw_q_depth += cnt;
+               card->ctrl[i].e_cnt = 0;
+
+               list_for_each_entry(dma, &card->ctrl[i].queue, list) {
+                       if (dma->dma_addr)
+                               pci_unmap_page(card->dev, dma->dma_addr,
+                                              get_dma_size(dma),
+                                              dma->cmd == HW_CMD_BLK_WRITE ?
+                                              PCI_DMA_TODEVICE :
+                                              PCI_DMA_FROMDEVICE);
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+
+       kfree(issued_dmas);
+
+       return 0;
+}
+
+void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
+{
+       struct rsxx_dma *dma;
+       struct rsxx_dma *tmp;
+       int i;
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
+                       list_del(&dma->list);
+
+                       rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+}
+
+int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
+{
+       struct rsxx_dma *dma;
+       int i;
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_for_each_entry(dma, &card->ctrl[i].queue, list) {
+                       dma->dma_addr = pci_map_page(card->dev, dma->page,
+                                       dma->pg_off, get_dma_size(dma),
+                                       dma->cmd == HW_CMD_BLK_WRITE ?
+                                       PCI_DMA_TODEVICE :
+                                       PCI_DMA_FROMDEVICE);
+                       if (!dma->dma_addr) {
+                               spin_unlock(&card->ctrl[i].queue_lock);
+                               kmem_cache_free(rsxx_dma_pool, dma);
+                               return -ENOMEM;
+                       }
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+
+       return 0;
+}
 
 int rsxx_dma_init(void)
 {
index 2e50b65902b71e564f9a25cba8a22cc09d893805..24ba3642bd89aeaabc113407eb2de6d92f13602a 100644 (file)
 
 /*----------------- IOCTL Definitions -------------------*/
 
+#define RSXX_MAX_DATA 8
+
 struct rsxx_reg_access {
        __u32 addr;
        __u32 cnt;
        __u32 stat;
        __u32 stream;
-       __u32 data[8];
+       __u32 data[RSXX_MAX_DATA];
 };
 
-#define RSXX_MAX_REG_CNT       (8 * (sizeof(__u32)))
+#define RSXX_MAX_REG_CNT       (RSXX_MAX_DATA * (sizeof(__u32)))
 
 #define RSXX_IOC_MAGIC 'r'
 
index c025fe5fdb703af4d1a94ed491814cded627a17d..f384c943846de35a2dab6d5f173df7b15c4bd5f2 100644 (file)
@@ -58,7 +58,7 @@ struct rsxx_card_cfg {
 };
 
 /* Vendor ID Values */
-#define RSXX_VENDOR_ID_TMS_IBM         0
+#define RSXX_VENDOR_ID_IBM             0
 #define RSXX_VENDOR_ID_DSI             1
 #define RSXX_VENDOR_COUNT              2
 
index a1ac907d8f4c3ee60aabb11bdfb439ea3418183c..382e8bf5c03b73dae35fd1ae8865110cc1a85168 100644 (file)
 
 struct proc_cmd;
 
-#define PCI_VENDOR_ID_TMS_IBM          0x15B6
-#define PCI_DEVICE_ID_RS70_FLASH       0x0019
-#define PCI_DEVICE_ID_RS70D_FLASH      0x001A
-#define PCI_DEVICE_ID_RS80_FLASH       0x001C
-#define PCI_DEVICE_ID_RS81_FLASH       0x001E
+#define PCI_DEVICE_ID_FS70_FLASH       0x04A9
+#define PCI_DEVICE_ID_FS80_FLASH       0x04AA
 
 #define RS70_PCI_REV_SUPPORTED 4
 
 #define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "3.7"
+#define DRIVER_VERSION "4.0"
 
 /* Block size is 4096 */
 #define RSXX_HW_BLK_SHIFT              12
@@ -67,6 +64,9 @@ struct proc_cmd;
 #define RSXX_MAX_OUTSTANDING_CMDS      255
 #define RSXX_CS_IDX_MASK               0xff
 
+#define STATUS_BUFFER_SIZE8     4096
+#define COMMAND_BUFFER_SIZE8    4096
+
 #define RSXX_MAX_TARGETS       8
 
 struct dma_tracker_list;
@@ -91,6 +91,9 @@ struct rsxx_dma_stats {
        u32 discards_failed;
        u32 done_rescheduled;
        u32 issue_rescheduled;
+       u32 dma_sw_err;
+       u32 dma_hw_fault;
+       u32 dma_cancelled;
        u32 sw_q_depth;         /* Number of DMAs on the SW queue. */
        atomic_t hw_q_depth;    /* Number of DMAs queued to HW. */
 };
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
 struct rsxx_cardinfo {
        struct pci_dev          *dev;
        unsigned int            halt;
+       unsigned int            eeh_state;
 
        void                    __iomem *regmap;
        spinlock_t              irq_lock;
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
        PERF_RD512_HI   = 0xac,
        PERF_WR512_LO   = 0xb0,
        PERF_WR512_HI   = 0xb4,
+       PCI_RECONFIG    = 0xb8,
 };
 
 enum rsxx_intr {
@@ -237,6 +242,8 @@ enum rsxx_intr {
        CR_INTR_DMA5    = 0x00000080,
        CR_INTR_DMA6    = 0x00000100,
        CR_INTR_DMA7    = 0x00000200,
+       CR_INTR_ALL_C   = 0x0000003f,
+       CR_INTR_ALL_G   = 0x000003ff,
        CR_INTR_DMA_ALL = 0x000003f5,
        CR_INTR_ALL     = 0xffffffff,
 };
@@ -253,8 +260,14 @@ enum rsxx_pci_reset {
        DMA_QUEUE_RESET         = 0x00000001,
 };
 
+enum rsxx_hw_fifo_flush {
+       RSXX_FLUSH_BUSY         = 0x00000002,
+       RSXX_FLUSH_TIMEOUT      = 0x00000004,
+};
+
 enum rsxx_pci_revision {
        RSXX_DISCARD_SUPPORT = 2,
+       RSXX_EEH_SUPPORT     = 3,
 };
 
 enum rsxx_creg_cmd {
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
 void rsxx_dma_destroy(struct rsxx_cardinfo *card);
 int rsxx_dma_init(void);
 void rsxx_dma_cleanup(void);
+void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
+int rsxx_dma_configure(struct rsxx_cardinfo *card);
 int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
                           void *cb_data);
+int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
+int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
+void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
+int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
 
 /***** cregs.c *****/
 int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
 void rsxx_creg_destroy(struct rsxx_cardinfo *card);
 int rsxx_creg_init(void);
 void rsxx_creg_cleanup(void);
-
 int rsxx_reg_access(struct rsxx_cardinfo *card,
                        struct rsxx_reg_access __user *ucmd,
                        int read);
+void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
+void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
 
 
 
index de1f319f7bd7e0118a960b5bd23fd286a6ba343e..dd5b2fed97e9ae2b7665f13ec3c42db6ee6fb462 100644 (file)
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
 
 #define foreach_grant_safe(pos, n, rbtree, node) \
        for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
-            (n) = rb_next(&(pos)->node); \
+            (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
             &(pos)->node != NULL; \
             (pos) = container_of(n, typeof(*(pos)), node), \
             (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
 
 static void print_stats(struct xen_blkif *blkif)
 {
-       pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
-                "  |  ds %4d\n",
+       pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
+                "  |  ds %4llu\n",
                 current->comm, blkif->st_oo_req,
                 blkif->st_rd_req, blkif->st_wr_req,
                 blkif->st_f_req, blkif->st_ds_req);
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
 }
 
 struct seg_buf {
-       unsigned long buf;
+       unsigned int offset;
        unsigned int nsec;
 };
 /*
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
                                 * If this is a new persistent grant
                                 * save the handler
                                 */
-                               persistent_gnts[i]->handle = map[j].handle;
-                               persistent_gnts[i]->dev_bus_addr =
-                                       map[j++].dev_bus_addr;
+                               persistent_gnts[i]->handle = map[j++].handle;
                        }
                        pending_handle(pending_req, i) =
                                persistent_gnts[i]->handle;
 
                        if (ret)
                                continue;
-
-                       seg[i].buf = persistent_gnts[i]->dev_bus_addr |
-                               (req->u.rw.seg[i].first_sect << 9);
                } else {
-                       pending_handle(pending_req, i) = map[j].handle;
+                       pending_handle(pending_req, i) = map[j++].handle;
                        bitmap_set(pending_req->unmap_seg, i, 1);
 
-                       if (ret) {
-                               j++;
+                       if (ret)
                                continue;
-                       }
-
-                       seg[i].buf = map[j++].dev_bus_addr |
-                               (req->u.rw.seg[i].first_sect << 9);
                }
+               seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
        }
        return ret;
 }
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
        return err;
 }
 
+static int dispatch_other_io(struct xen_blkif *blkif,
+                            struct blkif_request *req,
+                            struct pending_req *pending_req)
+{
+       free_req(pending_req);
+       make_response(blkif, req->u.other.id, req->operation,
+                     BLKIF_RSP_EOPNOTSUPP);
+       return -EIO;
+}
+
 static void xen_blk_drain_io(struct xen_blkif *blkif)
 {
        atomic_set(&blkif->drain, 1);
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
 
                /* Apply all sanity checks to /private copy/ of request. */
                barrier();
-               if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
+
+               switch (req.operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+               case BLKIF_OP_WRITE_BARRIER:
+               case BLKIF_OP_FLUSH_DISKCACHE:
+                       if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               case BLKIF_OP_DISCARD:
                        free_req(pending_req);
                        if (dispatch_discard_io(blkif, &req))
-                               break;
-               } else if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
                        break;
+               default:
+                       if (dispatch_other_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               }
 
                /* Yield point for this unbounded loop. */
                cond_resched();
        }
-
+done:
        return more_to_do;
 }
 
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
                         operation == READ ? "read" : "write",
                         preq.sector_number,
-                        preq.sector_number + preq.nr_sects, preq.dev);
+                        preq.sector_number + preq.nr_sects,
+                        blkif->vbd.pdevice);
                goto fail_response;
        }
 
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                       (bio_add_page(bio,
                                     pages[i],
                                     seg[i].nsec << 9,
-                                    seg[i].buf & ~PAGE_MASK) == 0)) {
+                                    seg[i].offset) == 0)) {
 
                        bio = bio_alloc(GFP_KERNEL, nseg-i);
                        if (unlikely(bio == NULL))
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                bio->bi_end_io  = end_block_io_op;
        }
 
-       /*
-        * We set it one so that the last submit_bio does not have to call
-        * atomic_inc.
-        */
        atomic_set(&pending_req->pendcnt, nbio);
-
-       /* Get a reference count for the disk queue and start sending I/O */
        blk_start_plug(&plug);
 
        for (i = 0; i < nbio; i++)
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
  fail_put_bio:
        for (i = 0; i < nbio; i++)
                bio_put(biolist[i]);
+       atomic_set(&pending_req->pendcnt, 1);
        __end_block_io_op(pending_req, -EINVAL);
        msleep(1); /* back off a bit */
        return -EIO;
index 6072390c7f57ef724bee4476502eb78d63941c7c..60103e2517ba28ac5c946f32e1bd9e0e6eb7f668 100644 (file)
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
        uint64_t       nr_sectors;
 } __attribute__((__packed__));
 
+struct blkif_x86_32_request_other {
+       uint8_t        _pad1;
+       blkif_vdev_t   _pad2;
+       uint64_t       id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_x86_32_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_x86_32_request_rw rw;
                struct blkif_x86_32_request_discard discard;
+               struct blkif_x86_32_request_other other;
        } u;
 } __attribute__((__packed__));
 
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
        uint64_t       nr_sectors;
 } __attribute__((__packed__));
 
+struct blkif_x86_64_request_other {
+       uint8_t        _pad1;
+       blkif_vdev_t   _pad2;
+       uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
+       uint64_t       id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_x86_64_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_x86_64_request_rw rw;
                struct blkif_x86_64_request_discard discard;
+               struct blkif_x86_64_request_other other;
        } u;
 } __attribute__((__packed__));
 
@@ -172,7 +187,6 @@ struct persistent_gnt {
        struct page *page;
        grant_ref_t gnt;
        grant_handle_t handle;
-       uint64_t dev_bus_addr;
        struct rb_node node;
 };
 
@@ -208,13 +222,13 @@ struct xen_blkif {
 
        /* statistics */
        unsigned long           st_print;
-       int                     st_rd_req;
-       int                     st_wr_req;
-       int                     st_oo_req;
-       int                     st_f_req;
-       int                     st_ds_req;
-       int                     st_rd_sect;
-       int                     st_wr_sect;
+       unsigned long long                      st_rd_req;
+       unsigned long long                      st_wr_req;
+       unsigned long long                      st_oo_req;
+       unsigned long long                      st_f_req;
+       unsigned long long                      st_ds_req;
+       unsigned long long                      st_rd_sect;
+       unsigned long long                      st_wr_sect;
 
        wait_queue_head_t       waiting_to_free;
 };
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
        default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
                break;
        }
 }
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
        default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
                break;
        }
 }
index 5e237f630c47f2b2299749e067744dc89adba1a8..8bfd1bcf95ec0c168f565ac769479cb3be6098d3 100644 (file)
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
        }                                                               \
        static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
-VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
-VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
-VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
-VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
-VBD_SHOW(ds_req,  "%d\n", be->blkif->st_ds_req);
-VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
-VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
+VBD_SHOW(oo_req,  "%llu\n", be->blkif->st_oo_req);
+VBD_SHOW(rd_req,  "%llu\n", be->blkif->st_rd_req);
+VBD_SHOW(wr_req,  "%llu\n", be->blkif->st_wr_req);
+VBD_SHOW(f_req,  "%llu\n", be->blkif->st_f_req);
+VBD_SHOW(ds_req,  "%llu\n", be->blkif->st_ds_req);
+VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
+VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
 
 static struct attribute *xen_vbdstat_attrs[] = {
        &dev_attr_oo_req.attr,
index c3dae2e0f290e8ad64b4f3e6c869c2fca14cd1a1..a894f88762d8d3a1f72315e805f9494cdad8294d 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/bitmap.h>
-#include <linux/llist.h>
+#include <linux/list.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -68,13 +68,12 @@ enum blkif_state {
 struct grant {
        grant_ref_t gref;
        unsigned long pfn;
-       struct llist_node node;
+       struct list_head node;
 };
 
 struct blk_shadow {
        struct blkif_request req;
        struct request *request;
-       unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 
@@ -105,7 +104,7 @@ struct blkfront_info
        struct work_struct work;
        struct gnttab_free_callback callback;
        struct blk_shadow shadow[BLK_RING_SIZE];
-       struct llist_head persistent_gnts;
+       struct list_head persistent_gnts;
        unsigned int persistent_gnts_c;
        unsigned long shadow_free;
        unsigned int feature_flush;
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
        return 0;
 }
 
+static int fill_grant_buffer(struct blkfront_info *info, int num)
+{
+       struct page *granted_page;
+       struct grant *gnt_list_entry, *n;
+       int i = 0;
+
+       while(i < num) {
+               gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
+               if (!gnt_list_entry)
+                       goto out_of_memory;
+
+               granted_page = alloc_page(GFP_NOIO);
+               if (!granted_page) {
+                       kfree(gnt_list_entry);
+                       goto out_of_memory;
+               }
+
+               gnt_list_entry->pfn = page_to_pfn(granted_page);
+               gnt_list_entry->gref = GRANT_INVALID_REF;
+               list_add(&gnt_list_entry->node, &info->persistent_gnts);
+               i++;
+       }
+
+       return 0;
+
+out_of_memory:
+       list_for_each_entry_safe(gnt_list_entry, n,
+                                &info->persistent_gnts, node) {
+               list_del(&gnt_list_entry->node);
+               __free_page(pfn_to_page(gnt_list_entry->pfn));
+               kfree(gnt_list_entry);
+               i--;
+       }
+       BUG_ON(i != 0);
+       return -ENOMEM;
+}
+
+static struct grant *get_grant(grant_ref_t *gref_head,
+                               struct blkfront_info *info)
+{
+       struct grant *gnt_list_entry;
+       unsigned long buffer_mfn;
+
+       BUG_ON(list_empty(&info->persistent_gnts));
+       gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
+                                         node);
+       list_del(&gnt_list_entry->node);
+
+       if (gnt_list_entry->gref != GRANT_INVALID_REF) {
+               info->persistent_gnts_c--;
+               return gnt_list_entry;
+       }
+
+       /* Assign a gref to this page */
+       gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+       BUG_ON(gnt_list_entry->gref == -ENOSPC);
+       buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+       gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
+                                       info->xbdev->otherend_id,
+                                       buffer_mfn, 0);
+       return gnt_list_entry;
+}
+
 static const char *op_name(int op)
 {
        static const char *const names[] = {
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
 static int blkif_queue_request(struct request *req)
 {
        struct blkfront_info *info = req->rq_disk->private_data;
-       unsigned long buffer_mfn;
        struct blkif_request *ring_req;
        unsigned long id;
        unsigned int fsect, lsect;
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
         */
        bool new_persistent_gnts;
        grant_ref_t gref_head;
-       struct page *granted_page;
        struct grant *gnt_list_entry = NULL;
        struct scatterlist *sg;
 
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
                        fsect = sg->offset >> 9;
                        lsect = fsect + (sg->length >> 9) - 1;
 
-                       if (info->persistent_gnts_c) {
-                               BUG_ON(llist_empty(&info->persistent_gnts));
-                               gnt_list_entry = llist_entry(
-                                       llist_del_first(&info->persistent_gnts),
-                                       struct grant, node);
-
-                               ref = gnt_list_entry->gref;
-                               buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
-                               info->persistent_gnts_c--;
-                       } else {
-                               ref = gnttab_claim_grant_reference(&gref_head);
-                               BUG_ON(ref == -ENOSPC);
-
-                               gnt_list_entry =
-                                       kmalloc(sizeof(struct grant),
-                                                        GFP_ATOMIC);
-                               if (!gnt_list_entry)
-                                       return -ENOMEM;
-
-                               granted_page = alloc_page(GFP_ATOMIC);
-                               if (!granted_page) {
-                                       kfree(gnt_list_entry);
-                                       return -ENOMEM;
-                               }
-
-                               gnt_list_entry->pfn =
-                                       page_to_pfn(granted_page);
-                               gnt_list_entry->gref = ref;
-
-                               buffer_mfn = pfn_to_mfn(page_to_pfn(
-                                                               granted_page));
-                               gnttab_grant_foreign_access_ref(ref,
-                                       info->xbdev->otherend_id,
-                                       buffer_mfn, 0);
-                       }
+                       gnt_list_entry = get_grant(&gref_head, info);
+                       ref = gnt_list_entry->gref;
 
                        info->shadow[id].grants_used[i] = gnt_list_entry;
 
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
                                kunmap_atomic(shared_data);
                        }
 
-                       info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
                        ring_req->u.rw.seg[i] =
                                        (struct blkif_request_segment) {
                                                .gref       = ref,
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
 
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
-       struct llist_node *all_gnts;
-       struct grant *persistent_gnt, *tmp;
-       struct llist_node *n;
+       struct grant *persistent_gnt;
+       struct grant *n;
 
        /* Prevent new requests being issued until we fix things up. */
        spin_lock_irq(&info->io_lock);
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
                blk_stop_queue(info->rq);
 
        /* Remove all persistent grants */
-       if (info->persistent_gnts_c) {
-               all_gnts = llist_del_all(&info->persistent_gnts);
-               persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
-               while (persistent_gnt) {
-                       gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+       if (!list_empty(&info->persistent_gnts)) {
+               list_for_each_entry_safe(persistent_gnt, n,
+                                        &info->persistent_gnts, node) {
+                       list_del(&persistent_gnt->node);
+                       if (persistent_gnt->gref != GRANT_INVALID_REF) {
+                               gnttab_end_foreign_access(persistent_gnt->gref,
+                                                         0, 0UL);
+                               info->persistent_gnts_c--;
+                       }
                        __free_page(pfn_to_page(persistent_gnt->pfn));
-                       tmp = persistent_gnt;
-                       n = persistent_gnt->node.next;
-                       if (n)
-                               persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
-                       else
-                               persistent_gnt = NULL;
-                       kfree(tmp);
+                       kfree(persistent_gnt);
                }
-               info->persistent_gnts_c = 0;
        }
+       BUG_ON(info->persistent_gnts_c != 0);
 
        /* No more gnttab callback work. */
        gnttab_cancel_free_callback(&info->callback);
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
        }
        /* Add the persistent grant into the list of free grants */
        for (i = 0; i < s->req.u.rw.nr_segments; i++) {
-               llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
+               list_add(&s->grants_used[i]->node, &info->persistent_gnts);
                info->persistent_gnts_c++;
        }
 }
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
 
        sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
+       /* Allocate memory for grants */
+       err = fill_grant_buffer(info, BLK_RING_SIZE *
+                                     BLKIF_MAX_SEGMENTS_PER_REQUEST);
+       if (err)
+               goto fail;
+
        err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
        if (err < 0) {
                free_page((unsigned long)sring);
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
        spin_lock_init(&info->io_lock);
        info->xbdev = dev;
        info->vdevice = vdevice;
-       init_llist_head(&info->persistent_gnts);
+       INIT_LIST_HEAD(&info->persistent_gnts);
        info->persistent_gnts_c = 0;
        info->connected = BLKIF_STATE_DISCONNECTED;
        INIT_WORK(&info->work, blkif_restart_queue);
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
        int j;
 
        /* Stage 1: Make a safe copy of the shadow state. */
-       copy = kmalloc(sizeof(info->shadow),
+       copy = kmemdup(info->shadow, sizeof(info->shadow),
                       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
        if (!copy)
                return -ENOMEM;
-       memcpy(copy, info->shadow, sizeof(info->shadow));
 
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
                                gnttab_grant_foreign_access_ref(
                                        req->u.rw.seg[j].gref,
                                        info->xbdev->otherend_id,
-                                       pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
+                                       pfn_to_mfn(copy[i].grants_used[j]->pfn),
                                        0);
                }
                info->shadow[req->u.rw.id].req = *req;
index b282af181b44dc974daab7ef4941e9373b833506..6aab00ef4379a5b9e4dfeb290461d3c177726f07 100644 (file)
@@ -73,9 +73,11 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x03F0, 0x311D) },
 
        /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x0036) },
        { USB_DEVICE(0x0CF3, 0x3004) },
        { USB_DEVICE(0x0CF3, 0x3008) },
        { USB_DEVICE(0x0CF3, 0x311D) },
+       { USB_DEVICE(0x0CF3, 0x817a) },
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
@@ -107,9 +109,11 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
 static struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
index e547851870e74811d71aa7511d5be022a0a862ab..2cc5f774a29c47140739361420564b3f7c15b095 100644 (file)
@@ -131,9 +131,11 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
        /* Atheros 3012 with sflash firmware */
+       { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
index 69ae5972713cf814e968c3c9d9ad8743d3dd240d..a0f7724852eb84d2ccb20eebc8e4309ea48a085c 100644 (file)
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng)
 }
 EXPORT_SYMBOL_GPL(hwrng_unregister);
 
+static void __exit hwrng_exit(void)
+{
+       mutex_lock(&rng_mutex);
+       BUG_ON(current_rng);
+       kfree(rng_buffer);
+       mutex_unlock(&rng_mutex);
+}
+
+module_exit(hwrng_exit);
 
 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
 MODULE_LICENSE("GPL");
index e905d5f5305180cd7cc77a8690e51ae53bed3fec..ce5f3fc25d6dbf1f7c45ff4cad6d2349da202d1a 100644 (file)
@@ -149,7 +149,8 @@ struct ports_device {
        spinlock_t ports_lock;
 
        /* To protect the vq operations for the control channel */
-       spinlock_t cvq_lock;
+       spinlock_t c_ivq_lock;
+       spinlock_t c_ovq_lock;
 
        /* The current config space is stored here */
        struct virtio_console_config config;
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
        vq = portdev->c_ovq;
 
        sg_init_one(sg, &cpkt, sizeof(cpkt));
+
+       spin_lock(&portdev->c_ovq_lock);
        if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
                virtqueue_kick(vq);
                while (!virtqueue_get_buf(vq, &len))
                        cpu_relax();
        }
+       spin_unlock(&portdev->c_ovq_lock);
        return 0;
 }
 
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id)
                 * rproc_serial does not want the console port, only
                 * the generic port implementation.
                 */
-               port->host_connected = port->guest_connected = true;
+               port->host_connected = true;
        else if (!use_multiport(port->portdev)) {
                /*
                 * If we're not using multiport support,
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work)
        portdev = container_of(work, struct ports_device, control_work);
        vq = portdev->c_ivq;
 
-       spin_lock(&portdev->cvq_lock);
+       spin_lock(&portdev->c_ivq_lock);
        while ((buf = virtqueue_get_buf(vq, &len))) {
-               spin_unlock(&portdev->cvq_lock);
+               spin_unlock(&portdev->c_ivq_lock);
 
                buf->len = len;
                buf->offset = 0;
 
                handle_control_message(portdev, buf);
 
-               spin_lock(&portdev->cvq_lock);
+               spin_lock(&portdev->c_ivq_lock);
                if (add_inbuf(portdev->c_ivq, buf) < 0) {
                        dev_warn(&portdev->vdev->dev,
                                 "Error adding buffer to queue\n");
                        free_buf(buf, false);
                }
        }
-       spin_unlock(&portdev->cvq_lock);
+       spin_unlock(&portdev->c_ivq_lock);
 }
 
 static void out_intr(struct virtqueue *vq)
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq)
        port->inbuf = get_inbuf(port);
 
        /*
-        * Don't queue up data when port is closed.  This condition
+        * Normally the port should not accept data when the port is
+        * closed. For generic serial ports, the host won't (shouldn't)
+        * send data till the guest is connected. But this condition
         * can be reached when a console port is not yet connected (no
-        * tty is spawned) and the host sends out data to console
-        * ports.  For generic serial ports, the host won't
-        * (shouldn't) send data till the guest is connected.
+        * tty is spawned) and the other side sends out data over the
+        * vring, or when a remote devices start sending data before
+        * the ports are opened.
+        *
+        * A generic serial port will discard data if not connected,
+        * while console ports and rproc-serial ports accepts data at
+        * any time. rproc-serial is initiated with guest_connected to
+        * false because port_fops_open expects this. Console ports are
+        * hooked up with an HVC console and is initialized with
+        * guest_connected to true.
         */
-       if (!port->guest_connected)
+
+       if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
                discard_port_data(port);
 
        spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev)
        if (multiport) {
                unsigned int nr_added_bufs;
 
-               spin_lock_init(&portdev->cvq_lock);
+               spin_lock_init(&portdev->c_ivq_lock);
+               spin_lock_init(&portdev->c_ovq_lock);
                INIT_WORK(&portdev->control_work, &control_work_handler);
 
-               nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+               nr_added_bufs = fill_queue(portdev->c_ivq,
+                                          &portdev->c_ivq_lock);
                if (!nr_added_bufs) {
                        dev_err(&vdev->dev,
                                "Error allocating buffers for control queue\n");
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev)
                return ret;
 
        if (use_multiport(portdev))
-               fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+               fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
 
        list_for_each_entry(port, &portdev->ports, list) {
                port->in_vq = portdev->in_vqs[port->id];
index 1e2de730536282da499d60a57397f56b7d871c5e..f873dcefe0de63b4271ac82302a24892dbe4803d 100644 (file)
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void)
        clks[pll_a_out0] = clk;
 
        /* PLLE */
-       clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+       clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
                             0, 100000000, &pll_e_params,
                             0, pll_e_freq_table, NULL);
        clk_register_clkdev(clk, "pll_e", NULL);
index 937bc286591f9349b5366295fa210412b8ffb756..57a8774f0b4ee907ab67d9c9f8fefd1e3d18fdbd 100644 (file)
@@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
                cpumask_copy(policy->cpus, perf->shared_cpu_map);
        }
-       cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 
 #ifdef CONFIG_SMP
        dmi_check_system(sw_any_bug_dmi_table);
@@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
                cpumask_clear(policy->cpus);
                cpumask_set_cpu(cpu, policy->cpus);
-               cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
                policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
                pr_info_once(PFX "overriding BIOS provided _PSD data\n");
        }
index 2fd779eb1ed1f9fcdb7cb70ab54ed319fe1e4160..bfd6273fd873531d864f3b4e104d37018a5d8dda 100644 (file)
@@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
 {
        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 
-       if (!cpufreq_frequency_get_table(cpu))
+       if (!policy)
                return;
 
-       if (policy && !policy_is_shared(policy)) {
+       if (!cpufreq_frequency_get_table(cpu))
+               goto put_ref;
+
+       if (!policy_is_shared(policy)) {
                pr_debug("%s: Free sysfs stat\n", __func__);
                sysfs_remove_group(&policy->kobj, &stats_attr_group);
        }
-       if (policy)
-               cpufreq_cpu_put(policy);
+
+put_ref:
+       cpufreq_cpu_put(policy);
 }
 
 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
index f6dd1e7611293bf5a66531f2acb285713439430c..ad72922919ed79b181a83467dea455216676d56c 100644 (file)
@@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
 static int intel_pstate_min_pstate(void)
 {
        u64 value;
-       rdmsrl(0xCE, value);
+       rdmsrl(MSR_PLATFORM_INFO, value);
        return (value >> 40) & 0xFF;
 }
 
 static int intel_pstate_max_pstate(void)
 {
        u64 value;
-       rdmsrl(0xCE, value);
+       rdmsrl(MSR_PLATFORM_INFO, value);
        return (value >> 8) & 0xFF;
 }
 
@@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
 {
        u64 value;
        int nont, ret;
-       rdmsrl(0x1AD, value);
+       rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
        nont = intel_pstate_max_pstate();
        ret = ((value) & 255);
        if (ret <= nont)
@@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        sample->idletime_us * 100,
                                        sample->duration_us);
        core_pct = div64_u64(sample->aperf * 100, sample->mperf);
-       sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
+       sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
 
        sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
                                        100);
@@ -752,6 +752,29 @@ static struct cpufreq_driver intel_pstate_driver = {
 
 static int __initdata no_load;
 
+static int intel_pstate_msrs_not_valid(void)
+{
+       /* Check that all the msr's we are using are valid. */
+       u64 aperf, mperf, tmp;
+
+       rdmsrl(MSR_IA32_APERF, aperf);
+       rdmsrl(MSR_IA32_MPERF, mperf);
+
+       if (!intel_pstate_min_pstate() ||
+               !intel_pstate_max_pstate() ||
+               !intel_pstate_turbo_pstate())
+               return -ENODEV;
+
+       rdmsrl(MSR_IA32_APERF, tmp);
+       if (!(tmp - aperf))
+               return -ENODEV;
+
+       rdmsrl(MSR_IA32_MPERF, tmp);
+       if (!(tmp - mperf))
+               return -ENODEV;
+
+       return 0;
+}
 static int __init intel_pstate_init(void)
 {
        int cpu, rc = 0;
@@ -764,6 +787,9 @@ static int __init intel_pstate_init(void)
        if (!id)
                return -ENODEV;
 
+       if (intel_pstate_msrs_not_valid())
+               return -ENODEV;
+
        pr_info("Intel P-state driver initializing.\n");
 
        all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
index b2a0a0726a5405caf7c0f7d252745d9554e34643..cf268b14ae9a393890ac37597f29008ee2c93126 100644 (file)
@@ -1650,11 +1650,7 @@ struct caam_alg_template {
 };
 
 static struct caam_alg_template driver_algs[] = {
-       /*
-        * single-pass ipsec_esp descriptor
-        * authencesn(*,*) is also registered, although not present
-        * explicitly here.
-        */
+       /* single-pass ipsec_esp descriptor */
        {
                .name = "authenc(hmac(md5),cbc(aes))",
                .driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                /* TODO: check if h/w supports alg */
                struct caam_crypto_alg *t_alg;
-               bool done = false;
 
-authencesn:
                t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
@@ -2233,25 +2227,8 @@ authencesn:
                        dev_warn(ctrldev, "%s alg registration failed\n",
                                t_alg->crypto_alg.cra_driver_name);
                        kfree(t_alg);
-               } else {
+               } else
                        list_add_tail(&t_alg->entry, &priv->alg_list);
-                       if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
-                           !memcmp(driver_algs[i].name, "authenc", 7) &&
-                           !done) {
-                               char *name;
-
-                               name = driver_algs[i].name;
-                               memmove(name + 10, name + 7, strlen(name) - 7);
-                               memcpy(name + 7, "esn", 3);
-
-                               name = driver_algs[i].driver_name;
-                               memmove(name + 10, name + 7, strlen(name) - 7);
-                               memcpy(name + 7, "esn", 3);
-
-                               done = true;
-                               goto authencesn;
-                       }
-               }
        }
        if (!list_empty(&priv->alg_list))
                dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
index cf15e7813801a6cb8e1d631663a2fb4421cb5a7a..762aeff626ac6f7a980c8fe3019fcd72252ecd7c 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/circ_buf.h>
-#include <linux/string.h>
 #include <net/xfrm.h>
 
 #include <crypto/algapi.h>
index 09b184adf31b73902a98651843805e0c129c15b9..5b2b5e61e4f9d0516e4b66090ba56ef79e941632 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/spinlock.h>
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
-#include <linux/string.h>
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
@@ -1974,11 +1973,7 @@ struct talitos_alg_template {
 };
 
 static struct talitos_alg_template driver_algs[] = {
-       /*
-        * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
-        * authencesn(*,*) is also registered, although not present
-        * explicitly here.
-        */
+       /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
        {       .type = CRYPTO_ALG_TYPE_AEAD,
                .alg.crypto = {
                        .cra_name = "authenc(hmac(sha1),cbc(aes))",
@@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
                        struct talitos_crypto_alg *t_alg;
                        char *name = NULL;
-                       bool authenc = false;
 
-authencesn:
                        t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
                        if (IS_ERR(t_alg)) {
                                err = PTR_ERR(t_alg);
@@ -2837,8 +2830,6 @@ authencesn:
                                err = crypto_register_alg(
                                                &t_alg->algt.alg.crypto);
                                name = t_alg->algt.alg.crypto.cra_driver_name;
-                               authenc = authenc ? !authenc :
-                                         !(bool)memcmp(name, "authenc", 7);
                                break;
                        case CRYPTO_ALG_TYPE_AHASH:
                                err = crypto_register_ahash(
@@ -2851,25 +2842,8 @@ authencesn:
                                dev_err(dev, "%s alg registration failed\n",
                                        name);
                                kfree(t_alg);
-                       } else {
+                       } else
                                list_add_tail(&t_alg->entry, &priv->alg_list);
-                               if (authenc) {
-                                       struct crypto_alg *alg =
-                                               &driver_algs[i].alg.crypto;
-
-                                       name = alg->cra_name;
-                                       memmove(name + 10, name + 7,
-                                               strlen(name) - 7);
-                                       memcpy(name + 7, "esn", 3);
-
-                                       name = alg->cra_driver_name;
-                                       memmove(name + 10, name + 7,
-                                               strlen(name) - 7);
-                                       memcpy(name + 7, "esn", 3);
-
-                                       goto authencesn;
-                               }
-                       }
                }
        }
        if (!list_empty(&priv->alg_list))
index 80b69971cf28d01752abd3654a8cc296f25a8380..aeaea32bcfdacd5fed2eb7e53536169348791cc8 100644 (file)
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA
 
 config DW_DMAC
        tristate "Synopsys DesignWare AHB DMA support"
+       depends on GENERIC_HARDIRQS
        select DMA_ENGINE
        default y if CPU_AT32AP7000
        help
index c599558faedaf29128a5a10bb15eaed7dbc5d41c..43a5329d44837c4042687d6f93436b3caf0627c1 100644 (file)
@@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
                *maxburst = 0;
 }
 
+static inline void convert_slave_id(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       dwc->dma_sconfig.slave_id -= dw->request_line_base;
+}
+
 static int
 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
@@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 
        convert_burst(&dwc->dma_sconfig.src_maxburst);
        convert_burst(&dwc->dma_sconfig.dst_maxburst);
+       convert_slave_id(dwc);
 
        return 0;
 }
@@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
        if (dma_spec->args_count != 3)
                return NULL;
 
-       fargs.req = be32_to_cpup(dma_spec->args+0);
-       fargs.src = be32_to_cpup(dma_spec->args+1);
-       fargs.dst = be32_to_cpup(dma_spec->args+2);
+       fargs.req = dma_spec->args[0];
+       fargs.src = dma_spec->args[1];
+       fargs.dst = dma_spec->args[2];
 
        if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
                    fargs.src >= dw->nr_masters ||
@@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 
 static int dw_probe(struct platform_device *pdev)
 {
+       const struct platform_device_id *match;
        struct dw_dma_platform_data *pdata;
        struct resource         *io;
        struct dw_dma           *dw;
@@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
                memcpy(dw->data_width, pdata->data_width, 4);
        }
 
+       /* Get the base request line if set */
+       match = platform_get_device_id(pdev);
+       if (match)
+               dw->request_line_base = (unsigned int)match->driver_data;
+
        /* Calculate all channel mask before DMA setup */
        dw->all_chan_mask = (1 << nr_channels) - 1;
 
@@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
 #endif
 
 static const struct platform_device_id dw_dma_ids[] = {
-       { "INTL9C60", 0 },
+       /* Name,        Request Line Base */
+       { "INTL9C60",   (kernel_ulong_t)16 },
        { }
 };
 
index cf0ce5c77d609d52e024f3426ab521bdb86e7545..4d02c3669b75bcf023a7f03916d02d3749ec2e1e 100644 (file)
@@ -247,6 +247,7 @@ struct dw_dma {
        /* hardware configuration */
        unsigned char           nr_masters;
        unsigned char           data_width[4];
+       unsigned int            request_line_base;
 
        struct dw_dma_chan      chan[0];
 };
index b70e3815c45932a3b76f7f30fff98d69907a8483..8f3c947b0029ab4a864d25dcd4a5ccaf4dfce90e 100644 (file)
 #define        DEV_NAME                        "max77693-muic"
 #define        DELAY_MS_DEFAULT                20000           /* unit: millisecond */
 
+/*
+ * Default value of MAX77693 register to bring up MUIC device.
+ * If user don't set some initial value for MUIC device through platform data,
+ * extcon-max77693 driver use 'default_init_data' to bring up base operation
+ * of MAX77693 MUIC device.
+ */
+struct max77693_reg_data default_init_data[] = {
+       {
+               /* STATUS2 - [3]ChgDetRun */
+               .addr = MAX77693_MUIC_REG_STATUS2,
+               .data = STATUS2_CHGDETRUN_MASK,
+       }, {
+               /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
+               .addr = MAX77693_MUIC_REG_INTMASK1,
+               .data = INTMASK1_ADC1K_MASK
+                       | INTMASK1_ADC_MASK,
+       }, {
+               /* INTMASK2 - Unmask [0]ChgTypM */
+               .addr = MAX77693_MUIC_REG_INTMASK2,
+               .data = INTMASK2_CHGTYP_MASK,
+       }, {
+               /* INTMASK3 - Mask all of interrupts */
+               .addr = MAX77693_MUIC_REG_INTMASK3,
+               .data = 0x0,
+       }, {
+               /* CDETCTRL2 */
+               .addr = MAX77693_MUIC_REG_CDETCTRL2,
+               .data = CDETCTRL2_VIDRMEN_MASK
+                       | CDETCTRL2_DXOVPEN_MASK,
+       },
+};
+
 enum max77693_muic_adc_debounce_time {
        ADC_DEBOUNCE_TIME_5MS = 0,
        ADC_DEBOUNCE_TIME_10MS,
@@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
 {
        struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
        struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
-       struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
        struct max77693_muic_info *info;
+       struct max77693_reg_data *init_data;
+       int num_init_data;
        int delay_jiffies;
        int ret;
        int i;
@@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       /* Initialize MUIC register by using platform data */
-       for (i = 0 ; i < muic_pdata->num_init_data ; i++) {
-               enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR;
+
+       /* Initialize MUIC register by using platform data or default data */
+       if (pdata->muic_data) {
+               init_data = pdata->muic_data->init_data;
+               num_init_data = pdata->muic_data->num_init_data;
+       } else {
+               init_data = default_init_data;
+               num_init_data = ARRAY_SIZE(default_init_data);
+       }
+
+       for (i = 0 ; i < num_init_data ; i++) {
+               enum max77693_irq_source irq_src
+                               = MAX77693_IRQ_GROUP_NR;
 
                max77693_write_reg(info->max77693->regmap_muic,
-                               muic_pdata->init_data[i].addr,
-                               muic_pdata->init_data[i].data);
+                               init_data[i].addr,
+                               init_data[i].data);
 
-               switch (muic_pdata->init_data[i].addr) {
+               switch (init_data[i].addr) {
                case MAX77693_MUIC_REG_INTMASK1:
                        irq_src = MUIC_INT1;
                        break;
@@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev)
 
                if (irq_src < MAX77693_IRQ_GROUP_NR)
                        info->max77693->irq_masks_cur[irq_src]
-                               = muic_pdata->init_data[i].data;
+                               = init_data[i].data;
        }
 
-       /*
-        * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
-        * h/w path of COMP2/COMN1 on CONTROL1 register.
-        */
-       if (muic_pdata->path_uart)
-               info->path_uart = muic_pdata->path_uart;
-       else
-               info->path_uart = CONTROL1_SW_UART;
+       if (pdata->muic_data) {
+               struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
 
-       if (muic_pdata->path_usb)
-               info->path_usb = muic_pdata->path_usb;
-       else
+               /*
+                * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+                * h/w path of COMP2/COMN1 on CONTROL1 register.
+                */
+               if (muic_pdata->path_uart)
+                       info->path_uart = muic_pdata->path_uart;
+               else
+                       info->path_uart = CONTROL1_SW_UART;
+
+               if (muic_pdata->path_usb)
+                       info->path_usb = muic_pdata->path_usb;
+               else
+                       info->path_usb = CONTROL1_SW_USB;
+
+               /*
+                * Default delay time for detecting cable state
+                * after certain time.
+                */
+               if (muic_pdata->detcable_delay_ms)
+                       delay_jiffies =
+                               msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+               else
+                       delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       } else {
                info->path_usb = CONTROL1_SW_USB;
+               info->path_uart = CONTROL1_SW_UART;
+               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       }
 
        /* Set initial path for UART */
         max77693_muic_set_path(info, info->path_uart, true);
@@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
         * driver should notify cable state to upper layer.
         */
        INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
-       if (muic_pdata->detcable_delay_ms)
-               delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
-       else
-               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
        schedule_delayed_work(&info->wq_detcable, delay_jiffies);
 
        return ret;
index e636d950ad6c15d11d279474d3b55d89b572e257..69641bcae325a8f73e226ee66d9f418730c2ddbd 100644 (file)
@@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       /* Initialize registers according to platform data */
        if (pdata->muic_pdata) {
-               struct max8997_muic_platform_data *mdata = info->muic_pdata;
-
-               for (i = 0; i < mdata->num_init_data; i++) {
-                       max8997_write_reg(info->muic, mdata->init_data[i].addr,
-                                       mdata->init_data[i].data);
+               struct max8997_muic_platform_data *muic_pdata
+                       = pdata->muic_pdata;
+
+               /* Initialize registers according to platform data */
+               for (i = 0; i < muic_pdata->num_init_data; i++) {
+                       max8997_write_reg(info->muic,
+                                       muic_pdata->init_data[i].addr,
+                                       muic_pdata->init_data[i].data);
                }
-       }
 
-       /*
-        * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
-        * h/w path of COMP2/COMN1 on CONTROL1 register.
-        */
-       if (pdata->muic_pdata->path_uart)
-               info->path_uart = pdata->muic_pdata->path_uart;
-       else
-               info->path_uart = CONTROL1_SW_UART;
+               /*
+                * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+                * h/w path of COMP2/COMN1 on CONTROL1 register.
+                */
+               if (muic_pdata->path_uart)
+                       info->path_uart = muic_pdata->path_uart;
+               else
+                       info->path_uart = CONTROL1_SW_UART;
 
-       if (pdata->muic_pdata->path_usb)
-               info->path_usb = pdata->muic_pdata->path_usb;
-       else
+               if (muic_pdata->path_usb)
+                       info->path_usb = muic_pdata->path_usb;
+               else
+                       info->path_usb = CONTROL1_SW_USB;
+
+               /*
+                * Default delay time for detecting cable state
+                * after certain time.
+                */
+               if (muic_pdata->detcable_delay_ms)
+                       delay_jiffies =
+                               msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+               else
+                       delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       } else {
+               info->path_uart = CONTROL1_SW_UART;
                info->path_usb = CONTROL1_SW_USB;
+               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       }
 
        /* Set initial path for UART */
         max8997_muic_set_path(info, info->path_uart, true);
@@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
         * driver should notify cable state to upper layer.
         */
        INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
-       if (pdata->muic_pdata->detcable_delay_ms)
-               delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
-       else
-               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
        schedule_delayed_work(&info->wq_detcable, delay_jiffies);
 
        return 0;
index 9b00072a020fb5141060c5ee89cfe31edbd651be..42c759a4d047f6754ddf8676a390d8d0dcdffafb 100644 (file)
@@ -53,6 +53,24 @@ config EFI_VARS
          Subsequent efibootmgr releases may be found at:
          <http://linux.dell.com/efibootmgr>
 
+config EFI_VARS_PSTORE
+       bool "Register efivars backend for pstore"
+       depends on EFI_VARS && PSTORE
+       default y
+       help
+         Say Y here to enable use efivars as a backend to pstore. This
+         will allow writing console messages, crash dumps, or anything
+         else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+       bool "Disable using efivars as a pstore backend by default"
+       depends on EFI_VARS_PSTORE
+       default n
+       help
+         Saying Y here will disable the use of efivars as a storage
+         backend for pstore by default. This setting can be overridden
+         using the efivars module's pstore_disable parameter.
+
 config EFI_PCDP
        bool "Console device selection via EFI PCDP or HCDP table"
        depends on ACPI && EFI && IA64
index fe62aa3922398ebc52e7f61b586a71814a4b9dfe..7acafb80fd4c79b19b1c53551bb5e7e8612af707 100644 (file)
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
  */
 #define GUID_LEN 36
 
+static bool efivars_pstore_disable =
+       IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
 /*
  * The maximum size of VariableName + Data = 1024
  * Therefore, it's reasonable to save that much
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
 
 static void efivar_update_sysfs_entries(struct work_struct *);
 static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
+static bool efivar_wq_enabled = true;
 
 /* Return the number of unicode characters in data */
 static unsigned long
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
        .create = efivarfs_create,
 };
 
-static struct pstore_info efi_pstore_info;
-
-#ifdef CONFIG_PSTORE
+#ifdef CONFIG_EFI_VARS_PSTORE
 
 static int efi_pstore_open(struct pstore_info *psi)
 {
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
 
        spin_unlock_irqrestore(&efivars->lock, flags);
 
-       if (reason == KMSG_DUMP_OOPS)
+       if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
                schedule_work(&efivar_work);
 
        *id = part;
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
 
        return 0;
 }
-#else
-static int efi_pstore_open(struct pstore_info *psi)
-{
-       return 0;
-}
-
-static int efi_pstore_close(struct pstore_info *psi)
-{
-       return 0;
-}
-
-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
-                              struct timespec *timespec,
-                              char **buf, struct pstore_info *psi)
-{
-       return -1;
-}
-
-static int efi_pstore_write(enum pstore_type_id type,
-               enum kmsg_dump_reason reason, u64 *id,
-               unsigned int part, int count, size_t size,
-               struct pstore_info *psi)
-{
-       return 0;
-}
-
-static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
-                           struct timespec time, struct pstore_info *psi)
-{
-       return 0;
-}
-#endif
 
 static struct pstore_info efi_pstore_info = {
        .owner          = THIS_MODULE,
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
        .erase          = efi_pstore_erase,
 };
 
+static void efivar_pstore_register(struct efivars *efivars)
+{
+       efivars->efi_pstore_info = efi_pstore_info;
+       efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+       if (efivars->efi_pstore_info.buf) {
+               efivars->efi_pstore_info.bufsize = 1024;
+               efivars->efi_pstore_info.data = efivars;
+               spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+               pstore_register(&efivars->efi_pstore_info);
+       }
+}
+#else
+static void efivar_pstore_register(struct efivars *efivars)
+{
+       return;
+}
+#endif
+
 static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
                             struct bin_attribute *bin_attr,
                             char *buf, loff_t pos, size_t count)
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
        return found;
 }
 
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+                                      unsigned long variable_name_size)
+{
+       unsigned long len;
+       efi_char16_t c;
+
+       /*
+        * The variable name is, by definition, a NULL-terminated
+        * string, so make absolutely sure that variable_name_size is
+        * the value we expect it to be. If not, return the real size.
+        */
+       for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+               c = variable_name[(len / sizeof(c)) - 1];
+               if (!c)
+                       break;
+       }
+
+       return min(len, variable_name_size);
+}
+
 static void efivar_update_sysfs_entries(struct work_struct *work)
 {
        struct efivars *efivars = &__efivars;
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
                if (!found) {
                        kfree(variable_name);
                        break;
-               } else
+               } else {
+                       variable_name_size = var_name_strnsize(variable_name,
+                                                              variable_name_size);
                        efivar_create_sysfs_entry(efivars,
                                                  variable_name_size,
                                                  variable_name, &vendor);
+               }
        }
 }
 
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
 }
 EXPORT_SYMBOL_GPL(unregister_efivars);
 
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
+                            unsigned long len16)
+{
+       size_t i, len8 = len16 / sizeof(efi_char16_t);
+       char *s8;
+
+       /*
+        * Disable the workqueue since the algorithm it uses for
+        * detecting new variables won't work with this buggy
+        * implementation of GetNextVariableName().
+        */
+       efivar_wq_enabled = false;
+
+       s8 = kzalloc(len8, GFP_KERNEL);
+       if (!s8)
+               return;
+
+       for (i = 0; i < len8; i++)
+               s8[i] = s16[i];
+
+       printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+              s8, vendor_guid);
+       kfree(s8);
+}
+
 int register_efivars(struct efivars *efivars,
                     const struct efivar_operations *ops,
                     struct kobject *parent_kobj)
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
                                                &vendor_guid);
                switch (status) {
                case EFI_SUCCESS:
+                       variable_name_size = var_name_strnsize(variable_name,
+                                                              variable_name_size);
+
+                       /*
+                        * Some firmware implementations return the
+                        * same variable name on multiple calls to
+                        * get_next_variable(). Terminate the loop
+                        * immediately as there is no guarantee that
+                        * we'll ever see a different variable name,
+                        * and may end up looping here forever.
+                        */
+                       if (variable_is_present(variable_name, &vendor_guid)) {
+                               dup_variable_bug(variable_name, &vendor_guid,
+                                                variable_name_size);
+                               status = EFI_NOT_FOUND;
+                               break;
+                       }
+
                        efivar_create_sysfs_entry(efivars,
                                                  variable_name_size,
                                                  variable_name,
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
        if (error)
                unregister_efivars(efivars);
 
-       efivars->efi_pstore_info = efi_pstore_info;
-
-       efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
-       if (efivars->efi_pstore_info.buf) {
-               efivars->efi_pstore_info.bufsize = 1024;
-               efivars->efi_pstore_info.data = efivars;
-               spin_lock_init(&efivars->efi_pstore_info.buf_lock);
-               pstore_register(&efivars->efi_pstore_info);
-       }
+       if (!efivars_pstore_disable)
+               efivar_pstore_register(efivars);
 
        register_filesystem(&efivarfs_type);
 
index f9dbd503fc40fb0f1bccd0fa8f0995816a06b538..de3c317bd3e2d46f788c09d4af95a68dbbc97c69 100644 (file)
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
         * If it can't be trusted, assume that the pin can be used as a GPIO.
         */
        if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
-               return 1;
+               return 0;
 
        return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
 }
index 770476a9da87fbc4ca3a0a64008768602db8f14d..3ce5bc38ac3180b015548271eeeb0b6a3c6c5543 100644 (file)
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
        .xlate = irq_domain_xlate_twocell,
 };
 
-static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
+               struct device_node *np)
 {
-       int base = stmpe_gpio->irq_base;
+       int base = 0;
 
-       stmpe_gpio->domain = irq_domain_add_simple(NULL,
+       if (!np)
+               base = stmpe_gpio->irq_base;
+
+       stmpe_gpio->domain = irq_domain_add_simple(np,
                                stmpe_gpio->chip.ngpio, base,
                                &stmpe_gpio_irq_simple_ops, stmpe_gpio);
        if (!stmpe_gpio->domain) {
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
        stmpe_gpio->chip = template_chip;
        stmpe_gpio->chip.ngpio = stmpe->num_gpios;
        stmpe_gpio->chip.dev = &pdev->dev;
+#ifdef CONFIG_OF
+       stmpe_gpio->chip.of_node = np;
+#endif
        stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
 
        if (pdata)
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
                goto out_free;
 
        if (irq >= 0) {
-               ret = stmpe_gpio_irq_init(stmpe_gpio);
+               ret = stmpe_gpio_irq_init(stmpe_gpio, np);
                if (ret)
                        goto out_disable;
 
index a71a54a3e3f783a4151ed5919c01290ade8ffd5d..5150df6cba0815623c9fc7c86e96ff2ad099f0ed 100644 (file)
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
        if (!np)
                return;
 
-       do {
+       for (;; index++) {
                ret = of_parse_phandle_with_args(np, "gpio-ranges",
                                "#gpio-range-cells", index, &pinspec);
                if (ret)
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
 
                if (ret)
                        break;
-
-       } while (index++);
+       }
 }
 
 #else
index 1e82882da9de6e97a1520ab180f5dff8fe103eb3..19b8e0d5d91088231f14bf59439e9fb64414cc94 100644 (file)
@@ -220,3 +220,5 @@ source "drivers/gpu/drm/tegra/Kconfig"
 source "drivers/gpu/drm/omapdrm/Kconfig"
 
 source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
index 0d59b24f8d23a38e113af74d2ac44afe80393386..6a4211521011002105ffa62b7eec5f7b45458eb2 100644 (file)
@@ -52,4 +52,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_OMAP) += omapdrm/
 obj-$(CONFIG_DRM_TILCDC)       += tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
 obj-y                  += i2c/
index a575cb2e6bdbe0b40320b1b836fb0780d7186aef..bb8f58012189af7651c20c2d9b1da1ed390ebcfd 100644 (file)
@@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st)
 {
 #if defined(CONFIG_X86)
        if (cpu_has_clflush) {
-               struct scatterlist *sg;
-               int i;
+               struct sg_page_iter sg_iter;
 
                mb();
-               for_each_sg(st->sgl, sg, st->nents, i)
-                       drm_clflush_page(sg_page(sg));
+               for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+                       drm_clflush_page(sg_page_iter_page(&sg_iter));
                mb();
 
                return;
index 792c3e3795cae819caedaf12121052b47ad34a89..957fb70e8d0e41b16d7d078fbed5b9cff57363e6 100644 (file)
@@ -412,7 +412,7 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
        mutex_lock(&dev->mode_config.fb_lock);
        fb = __drm_framebuffer_lookup(dev, id);
        if (fb)
-               kref_get(&fb->refcount);
+               drm_framebuffer_reference(fb);
        mutex_unlock(&dev->mode_config.fb_lock);
 
        return fb;
@@ -1120,44 +1120,6 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
 
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
-       mutex_init(&dev->mode_config.mutex);
-       mutex_init(&dev->mode_config.idr_mutex);
-       mutex_init(&dev->mode_config.fb_lock);
-       INIT_LIST_HEAD(&dev->mode_config.fb_list);
-       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
-       INIT_LIST_HEAD(&dev->mode_config.connector_list);
-       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
-       INIT_LIST_HEAD(&dev->mode_config.property_list);
-       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
-       INIT_LIST_HEAD(&dev->mode_config.plane_list);
-       idr_init(&dev->mode_config.crtc_idr);
-
-       drm_modeset_lock_all(dev);
-       drm_mode_create_standard_connector_properties(dev);
-       drm_modeset_unlock_all(dev);
-
-       /* Just to be sure */
-       dev->mode_config.num_fb = 0;
-       dev->mode_config.num_connector = 0;
-       dev->mode_config.num_crtc = 0;
-       dev->mode_config.num_encoder = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
 int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
 {
        uint32_t total_objects = 0;
@@ -1202,69 +1164,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
-       struct drm_connector *connector, *ot;
-       struct drm_crtc *crtc, *ct;
-       struct drm_encoder *encoder, *enct;
-       struct drm_framebuffer *fb, *fbt;
-       struct drm_property *property, *pt;
-       struct drm_plane *plane, *plt;
-
-       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
-                                head) {
-               encoder->funcs->destroy(encoder);
-       }
-
-       list_for_each_entry_safe(connector, ot,
-                                &dev->mode_config.connector_list, head) {
-               connector->funcs->destroy(connector);
-       }
-
-       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
-                                head) {
-               drm_property_destroy(dev, property);
-       }
-
-       /*
-        * Single-threaded teardown context, so it's not required to grab the
-        * fb_lock to protect against concurrent fb_list access. Contrary, it
-        * would actually deadlock with the drm_framebuffer_cleanup function.
-        *
-        * Also, if there are any framebuffers left, that's a driver leak now,
-        * so politely WARN about this.
-        */
-       WARN_ON(!list_empty(&dev->mode_config.fb_list));
-       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-               drm_framebuffer_remove(fb);
-       }
-
-       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
-                                head) {
-               plane->funcs->destroy(plane);
-       }
-
-       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-               crtc->funcs->destroy(crtc);
-       }
-
-       idr_destroy(&dev->mode_config.crtc_idr);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
 /**
  * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
  * @out: drm_mode_modeinfo struct to return to the user
@@ -2326,7 +2225,6 @@ int drm_mode_addfb(struct drm_device *dev,
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("could not create framebuffer\n");
-               drm_modeset_unlock_all(dev);
                return PTR_ERR(fb);
        }
 
@@ -2506,7 +2404,6 @@ int drm_mode_addfb2(struct drm_device *dev,
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("could not create framebuffer\n");
-               drm_modeset_unlock_all(dev);
                return PTR_ERR(fb);
        }
 
@@ -4066,3 +3963,110 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
        }
 }
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       mutex_init(&dev->mode_config.idr_mutex);
+       mutex_init(&dev->mode_config.fb_lock);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       INIT_LIST_HEAD(&dev->mode_config.plane_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       drm_modeset_lock_all(dev);
+       drm_mode_create_standard_connector_properties(dev);
+       drm_modeset_unlock_all(dev);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+       struct drm_property_blob *blob, *bt;
+       struct drm_plane *plane, *plt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+                                head) {
+               drm_property_destroy_blob(dev, blob);
+       }
+
+       /*
+        * Single-threaded teardown context, so it's not required to grab the
+        * fb_lock to protect against concurrent fb_list access. Contrary, it
+        * would actually deadlock with the drm_framebuffer_cleanup function.
+        *
+        * Also, if there are any framebuffers left, that's a driver leak now,
+        * so politely WARN about this.
+        */
+       WARN_ON(!list_empty(&dev->mode_config.fb_list));
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               drm_framebuffer_remove(fb);
+       }
+
+       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+                                head) {
+               plane->funcs->destroy(plane);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+       idr_destroy(&dev->mode_config.crtc_idr);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
index 25f91cd23e60932ec9246840de26f650c49047f1..0ac1991a470a9892a0a741657182173115b906cf 100644 (file)
@@ -408,6 +408,7 @@ long drm_ioctl(struct file *filp,
                usize = asize = _IOC_SIZE(cmd);
                if (drv_size > asize)
                        asize = drv_size;
+               cmd = ioctl->cmd_drv;
        }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
                ioctl = &drm_ioctls[nr];
index 38d3943f72defa65c878bf198fe2ac5742de5ad8..fa445dd4dc006e5c1ec2f079b206cdd72cecf7e6 100644 (file)
@@ -31,10 +31,11 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
 MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
        "from built-in data or /lib/firmware instead. ");
 
-#define GENERIC_EDIDS 4
+#define GENERIC_EDIDS 5
 static char *generic_edid_name[GENERIC_EDIDS] = {
        "edid/1024x768.bin",
        "edid/1280x1024.bin",
+       "edid/1600x1200.bin",
        "edid/1680x1050.bin",
        "edid/1920x1080.bin",
 };
@@ -79,6 +80,24 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
        {
        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
        0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
+       0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+       0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
+       0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
+       0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
+       0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+       0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+       0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+       0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
+       0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+       0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
+       0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
+       },
+       {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+       0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
        0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
        0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
index 59d6b9bf204bbd2d76b8440b3f7d435def394fe7..6764dce44e84e9a48e984a929c5cfe39f25cea42 100644 (file)
@@ -1398,7 +1398,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
        struct drm_mode_set *modeset;
        bool *enabled;
        int width, height;
-       int i, ret;
+       int i;
 
        DRM_DEBUG_KMS("\n");
 
@@ -1419,16 +1419,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 
        drm_enable_connectors(fb_helper, enabled);
 
-       ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
-       if (!ret) {
-               ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
-               if (!ret)
+       if (!(fb_helper->funcs->initial_config &&
+             fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+                                              enabled, width, height))) {
+               memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+               memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+               if (!drm_target_cloned(fb_helper,
+                                      modes, enabled, width, height) &&
+                   !drm_target_preferred(fb_helper,
+                                         modes, enabled, width, height))
                        DRM_ERROR("Unable to find initial modes\n");
-       }
 
-       DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+               DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+                             width, height);
 
-       drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+               drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+       }
 
        /* need to set the modesets up here for use later */
        /* fill out the connector<->crtc mappings into the modesets */
index 13fdcd10a6051ddd9359af281390748528a6885e..429e07d0b0f147f2c9f672d65a18849f0c57f0f4 100644 (file)
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)
        int retcode = 0;
        int need_setup = 0;
        struct address_space *old_mapping;
+       struct address_space *old_imapping;
 
        minor = idr_find(&drm_minors_idr, minor_id);
        if (!minor)
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)
        if (!dev->open_count++)
                need_setup = 1;
        mutex_lock(&dev->struct_mutex);
+       old_imapping = inode->i_mapping;
        old_mapping = dev->dev_mapping;
        if (old_mapping == NULL)
                dev->dev_mapping = &inode->i_data;
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)
 
 err_undo:
        mutex_lock(&dev->struct_mutex);
-       filp->f_mapping = old_mapping;
-       inode->i_mapping = old_mapping;
+       filp->f_mapping = old_imapping;
+       inode->i_mapping = old_imapping;
        iput(container_of(dev->dev_mapping, struct inode, i_data));
        dev->dev_mapping = old_mapping;
        mutex_unlock(&dev->struct_mutex);
index 366910ddcfcb3cd96b33c2dcd9c1d122b01990b5..25d02187067e79e5f9b8df531a5e9328ec6e6fa0 100644 (file)
@@ -401,21 +401,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
 {
        struct sg_table *sg = NULL;
-       struct scatterlist *iter;
-       int i;
        int ret;
 
        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (!sg)
                goto out;
 
-       ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+       ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+                               nr_pages << PAGE_SHIFT, GFP_KERNEL);
        if (ret)
                goto out;
 
-       for_each_sg(sg->sgl, iter, nr_pages, i)
-               sg_set_page(iter, pages[i], PAGE_SIZE, 0);
-
        return sg;
 out:
        kfree(sg);
index 7299ea45dd03dec217944f31422f5f1a62bcc6eb..be88532b35cfd0a7776ffe48cfbf8203f9110da0 100644 (file)
@@ -772,6 +772,23 @@ static int i915_error_state(struct seq_file *m, void *unused)
                                }
                        }
                }
+
+               obj = error->ring[i].ctx;
+               if (obj) {
+                       seq_printf(m, "%s --- HW Context = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          obj->pages[0][elt],
+                                          obj->pages[0][elt+1],
+                                          obj->pages[0][elt+2],
+                                          obj->pages[0][elt+3]);
+                                       offset += 16;
+                       }
+               }
        }
 
        if (error->overlay)
@@ -849,76 +866,42 @@ static const struct file_operations i915_error_state_fops = {
        .release = i915_error_state_release,
 };
 
-static ssize_t
-i915_next_seqno_read(struct file *filp,
-                char __user *ubuf,
-                size_t max,
-                loff_t *ppos)
+static int
+i915_next_seqno_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[80];
-       int len;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
-       len = snprintf(buf, sizeof(buf),
-                      "next_seqno :  0x%x\n",
-                      dev_priv->next_seqno);
-
+       *val = dev_priv->next_seqno;
        mutex_unlock(&dev->struct_mutex);
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
-
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_next_seqno_write(struct file *filp,
-                     const char __user *ubuf,
-                     size_t cnt,
-                     loff_t *ppos)
-{
-       struct drm_device *dev = filp->private_data;
-       char buf[20];
-       u32 val = 1;
+static int
+i915_next_seqno_set(void *data, u64 val)
+{
+       struct drm_device *dev = data;
        int ret;
 
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               ret = kstrtouint(buf, 0, &val);
-               if (ret < 0)
-                       return ret;
-       }
-
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
        ret = i915_gem_set_seqno(dev, val);
-
        mutex_unlock(&dev->struct_mutex);
 
-       return ret ?: cnt;
+       return ret;
 }
 
-static const struct file_operations i915_next_seqno_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_next_seqno_read,
-       .write = i915_next_seqno_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
+                       i915_next_seqno_get, i915_next_seqno_set,
+                       "next_seqno :  0x%llx\n");
 
 static int i915_rstdby_delays(struct seq_file *m, void *unused)
 {
@@ -1680,105 +1663,51 @@ static int i915_dpio_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static ssize_t
-i915_wedged_read(struct file *filp,
-                char __user *ubuf,
-                size_t max,
-                loff_t *ppos)
+static int
+i915_wedged_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[80];
-       int len;
-
-       len = snprintf(buf, sizeof(buf),
-                      "wedged :  %d\n",
-                      atomic_read(&dev_priv->gpu_error.reset_counter));
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
+       *val = atomic_read(&dev_priv->gpu_error.reset_counter);
 
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_wedged_write(struct file *filp,
-                 const char __user *ubuf,
-                 size_t cnt,
-                 loff_t *ppos)
+static int
+i915_wedged_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
-       char buf[20];
-       int val = 1;
-
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
+       struct drm_device *dev = data;
 
-               val = simple_strtoul(buf, NULL, 0);
-       }
-
-       DRM_INFO("Manually setting wedged to %d\n", val);
+       DRM_INFO("Manually setting wedged to %llu\n", val);
        i915_handle_error(dev, val);
 
-       return cnt;
+       return 0;
 }
 
-static const struct file_operations i915_wedged_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_wedged_read,
-       .write = i915_wedged_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+                       i915_wedged_get, i915_wedged_set,
+                       "wedged :  %llu\n");
 
-static ssize_t
-i915_ring_stop_read(struct file *filp,
-                   char __user *ubuf,
-                   size_t max,
-                   loff_t *ppos)
+static int
+i915_ring_stop_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[20];
-       int len;
-
-       len = snprintf(buf, sizeof(buf),
-                      "0x%08x\n", dev_priv->gpu_error.stop_rings);
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
+       *val = dev_priv->gpu_error.stop_rings;
 
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_ring_stop_write(struct file *filp,
-                    const char __user *ubuf,
-                    size_t cnt,
-                    loff_t *ppos)
+static int
+i915_ring_stop_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       char buf[20];
-       int val = 0, ret;
-
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               val = simple_strtoul(buf, NULL, 0);
-       }
+       int ret;
 
-       DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+       DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
@@ -1787,16 +1716,12 @@ i915_ring_stop_write(struct file *filp,
        dev_priv->gpu_error.stop_rings = val;
        mutex_unlock(&dev->struct_mutex);
 
-       return cnt;
+       return 0;
 }
 
-static const struct file_operations i915_ring_stop_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_ring_stop_read,
-       .write = i915_ring_stop_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+                       i915_ring_stop_get, i915_ring_stop_set,
+                       "0x%08llx\n");
 
 #define DROP_UNBOUND 0x1
 #define DROP_BOUND 0x2
@@ -1806,46 +1731,23 @@ static const struct file_operations i915_ring_stop_fops = {
                  DROP_BOUND | \
                  DROP_RETIRE | \
                  DROP_ACTIVE)
-static ssize_t
-i915_drop_caches_read(struct file *filp,
-                     char __user *ubuf,
-                     size_t max,
-                     loff_t *ppos)
+static int
+i915_drop_caches_get(void *data, u64 *val)
 {
-       char buf[20];
-       int len;
-
-       len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
-       if (len > sizeof(buf))
-               len = sizeof(buf);
+       *val = DROP_ALL;
 
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_drop_caches_write(struct file *filp,
-                      const char __user *ubuf,
-                      size_t cnt,
-                      loff_t *ppos)
+static int
+i915_drop_caches_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
-       char buf[20];
-       int val = 0, ret;
-
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               val = simple_strtoul(buf, NULL, 0);
-       }
+       int ret;
 
-       DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
+       DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
 
        /* No need to check and wait for gpu resets, only libdrm auto-restarts
         * on ioctls on -EAGAIN. */
@@ -1883,27 +1785,19 @@ i915_drop_caches_write(struct file *filp,
 unlock:
        mutex_unlock(&dev->struct_mutex);
 
-       return ret ?: cnt;
+       return ret;
 }
 
-static const struct file_operations i915_drop_caches_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_drop_caches_read,
-       .write = i915_drop_caches_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+                       i915_drop_caches_get, i915_drop_caches_set,
+                       "0x%08llx\n");
 
-static ssize_t
-i915_max_freq_read(struct file *filp,
-                  char __user *ubuf,
-                  size_t max,
-                  loff_t *ppos)
+static int
+i915_max_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[80];
-       int len, ret;
+       int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
@@ -1912,42 +1806,23 @@ i915_max_freq_read(struct file *filp,
        if (ret)
                return ret;
 
-       len = snprintf(buf, sizeof(buf),
-                      "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+       *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
-
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_max_freq_write(struct file *filp,
-                 const char __user *ubuf,
-                 size_t cnt,
-                 loff_t *ppos)
+static int
+i915_max_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       char buf[20];
-       int val = 1, ret;
+       int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               val = simple_strtoul(buf, NULL, 0);
-       }
-
-       DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+       DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
@@ -1956,30 +1831,24 @@ i915_max_freq_write(struct file *filp,
        /*
         * Turbo will still be enabled, but won't go above the set value.
         */
-       dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
-
-       gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+       do_div(val, GT_FREQUENCY_MULTIPLIER);
+       dev_priv->rps.max_delay = val;
+       gen6_set_rps(dev, val);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return cnt;
+       return 0;
 }
 
-static const struct file_operations i915_max_freq_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_max_freq_read,
-       .write = i915_max_freq_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
+                       i915_max_freq_get, i915_max_freq_set,
+                       "max freq: %llu\n");
 
-static ssize_t
-i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
-                  loff_t *ppos)
+static int
+i915_min_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[80];
-       int len, ret;
+       int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
@@ -1988,40 +1857,23 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
        if (ret)
                return ret;
 
-       len = snprintf(buf, sizeof(buf),
-                      "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+       *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
-
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
-                   loff_t *ppos)
+static int
+i915_min_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       char buf[20];
-       int val = 1, ret;
+       int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               val = simple_strtoul(buf, NULL, 0);
-       }
-
-       DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+       DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
@@ -2030,33 +1882,25 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
        /*
         * Turbo will still be enabled, but won't go below the set value.
         */
-       dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
-
-       gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+       do_div(val, GT_FREQUENCY_MULTIPLIER);
+       dev_priv->rps.min_delay = val;
+       gen6_set_rps(dev, val);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return cnt;
+       return 0;
 }
 
-static const struct file_operations i915_min_freq_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_min_freq_read,
-       .write = i915_min_freq_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
+                       i915_min_freq_get, i915_min_freq_set,
+                       "min freq: %llu\n");
 
-static ssize_t
-i915_cache_sharing_read(struct file *filp,
-                  char __user *ubuf,
-                  size_t max,
-                  loff_t *ppos)
+static int
+i915_cache_sharing_get(void *data, u64 *val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       char buf[80];
        u32 snpcr;
-       int len, ret;
+       int ret;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
@@ -2068,46 +1912,25 @@ i915_cache_sharing_read(struct file *filp,
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
        mutex_unlock(&dev_priv->dev->struct_mutex);
 
-       len = snprintf(buf, sizeof(buf),
-                      "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
-                      GEN6_MBC_SNPCR_SHIFT);
+       *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
-       if (len > sizeof(buf))
-               len = sizeof(buf);
-
-       return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+       return 0;
 }
 
-static ssize_t
-i915_cache_sharing_write(struct file *filp,
-                 const char __user *ubuf,
-                 size_t cnt,
-                 loff_t *ppos)
+static int
+i915_cache_sharing_set(void *data, u64 val)
 {
-       struct drm_device *dev = filp->private_data;
+       struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       char buf[20];
        u32 snpcr;
-       int val = 1;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
-       if (cnt > 0) {
-               if (cnt > sizeof(buf) - 1)
-                       return -EINVAL;
-
-               if (copy_from_user(buf, ubuf, cnt))
-                       return -EFAULT;
-               buf[cnt] = 0;
-
-               val = simple_strtoul(buf, NULL, 0);
-       }
-
-       if (val < 0 || val > 3)
+       if (val > 3)
                return -EINVAL;
 
-       DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+       DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
 
        /* Update the cache sharing policy here as well */
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
@@ -2115,16 +1938,12 @@ i915_cache_sharing_write(struct file *filp,
        snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
        I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
-       return cnt;
+       return 0;
 }
 
-static const struct file_operations i915_cache_sharing_fops = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = i915_cache_sharing_read,
-       .write = i915_cache_sharing_write,
-       .llseek = default_llseek,
-};
+DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
+                       i915_cache_sharing_get, i915_cache_sharing_set,
+                       "%llu\n");
 
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
index 4fa6beb14c7741bf42a1f30a766b855d3494c406..4be58e3b8e4fa46726a182bc7d10e86957ec9980 100644 (file)
@@ -1452,6 +1452,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef DEV_INFO_SEP
 }
 
+/**
+ * intel_early_sanitize_regs - clean up BIOS state
+ * @dev: DRM device
+ *
+ * This function must be called before we do any I915_READ or I915_WRITE. Its
+ * purpose is to clean up any state left by the BIOS that may affect us when
+ * reading and/or writing registers.
+ */
+static void intel_early_sanitize_regs(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_HASWELL(dev))
+               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1542,6 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_gmch;
        }
 
+       intel_early_sanitize_regs(dev);
+
        aperture_size = dev_priv->gtt.mappable_end;
 
        dev_priv->gtt.mappable =
@@ -1612,14 +1630,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        mutex_init(&dev_priv->rps.hw_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
 
-       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-               dev_priv->num_pipe = 3;
-       else if (IS_MOBILE(dev) || !IS_GEN2(dev))
-               dev_priv->num_pipe = 2;
-       else
-               dev_priv->num_pipe = 1;
+       dev_priv->num_plane = 1;
+       if (IS_VALLEYVIEW(dev))
+               dev_priv->num_plane = 2;
 
-       ret = drm_vblank_init(dev, dev_priv->num_pipe);
+       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
        if (ret)
                goto out_gem_unload;
 
index e9b57893db2b1ebba1348a71e37c845f98c3286b..3b4b9c09a20bef47ebcda65377fad9d33d8b9dc8 100644 (file)
@@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt,
 unsigned int i915_preliminary_hw_support __read_mostly = 0;
 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
-               "Enable preliminary hardware support. "
-               "Enable Haswell and ValleyView Support. "
-               "(default: false)");
+               "Enable preliminary hardware support. (default: false)");
 
 int i915_disable_power_well __read_mostly = 0;
 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -143,74 +141,74 @@ extern int intel_agp_enabled;
        .driver_data = (unsigned long) info }
 
 static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .gen = 2,
+       .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i85x = 1, .is_mobile = 1,
+       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .gen = 2,
+       .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_mobile = 1,
+       .gen = 3, .is_mobile = 1, .num_pipes = 2,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
 };
 
 static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1,
+       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
        .has_hotplug = 1,
        .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1,
+       .gen = 4, .is_crestline = 1, .num_pipes = 2,
        .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
        .has_overlay = 1,
        .supports_tv = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1,
+       .gen = 3, .is_g33 = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_g4x = 1,
+       .gen = 4, .is_g4x = 1, .num_pipes = 2,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .supports_tv = 1,
@@ -218,26 +216,26 @@ static const struct intel_device_info intel_gm45_info = {
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5,
+       .gen = 5, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_mobile = 1,
+       .gen = 5, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
        .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6,
+       .gen = 6, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_bsd_ring = 1,
        .has_blt_ring = 1,
@@ -246,7 +244,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_mobile = 1,
+       .gen = 6, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
        .has_bsd_ring = 1,
@@ -255,61 +253,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_force_wake = 1,
 };
 
+#define GEN7_FEATURES  \
+       .gen = 7, .num_pipes = 3, \
+       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_bsd_ring = 1, \
+       .has_blt_ring = 1, \
+       .has_llc = 1, \
+       .has_force_wake = 1
+
 static const struct intel_device_info intel_ivybridge_d_info = {
-       .is_ivybridge = 1, .gen = 7,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
-       .has_llc = 1,
-       .has_force_wake = 1,
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
-       .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 0,   /* FBC is not enabled on Ivybridge mobile yet */
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
-       .has_llc = 1,
-       .has_force_wake = 1,
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+       .is_mobile = 1,
 };
 
 static const struct intel_device_info intel_valleyview_m_info = {
-       .gen = 7, .is_mobile = 1,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 0,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
+       GEN7_FEATURES,
+       .is_mobile = 1,
+       .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
 };
 
 static const struct intel_device_info intel_valleyview_d_info = {
-       .gen = 7,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 0,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
+       GEN7_FEATURES,
+       .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
 };
 
 static const struct intel_device_info intel_haswell_d_info = {
-       .is_haswell = 1, .gen = 7,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
-       .has_llc = 1,
-       .has_force_wake = 1,
+       GEN7_FEATURES,
+       .is_haswell = 1,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
-       .is_haswell = 1, .gen = 7, .is_mobile = 1,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
-       .has_llc = 1,
-       .has_force_wake = 1,
+       GEN7_FEATURES,
+       .is_haswell = 1,
+       .is_mobile = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -394,6 +380,9 @@ static const struct pci_device_id pciidlist[] = {           /* aka */
        INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
        INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
        INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+       INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
+       INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
+       INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
        {0, 0, 0}
@@ -474,6 +463,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
 
        /* ignore lid events during suspend */
        mutex_lock(&dev_priv->modeset_restore_lock);
@@ -497,10 +487,14 @@ static int i915_drm_freeze(struct drm_device *dev)
 
                cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 
-               intel_modeset_disable(dev);
-
                drm_irq_uninstall(dev);
                dev_priv->enable_hotplug_processing = false;
+               /*
+                * Disable CRTCs directly since we want to preserve sw state
+                * for _thaw.
+                */
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+                       dev_priv->display.crtc_disable(crtc);
        }
 
        i915_save_state(dev);
@@ -556,6 +550,24 @@ void intel_console_resume(struct work_struct *work)
        console_unlock();
 }
 
+static void intel_resume_hotplug(struct drm_device *dev)
+{
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+
+       mutex_lock(&mode_config->mutex);
+       DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+               if (encoder->hot_plug)
+                       encoder->hot_plug(encoder);
+
+       mutex_unlock(&mode_config->mutex);
+
+       /* Just fire off a uevent and let userspace tell us what to do */
+       drm_helper_hpd_irq_event(dev);
+}
+
 static int __i915_drm_thaw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -578,7 +590,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
                drm_irq_install(dev);
 
                intel_modeset_init_hw(dev);
-               intel_modeset_setup_hw_state(dev, false);
+
+               drm_modeset_lock_all(dev);
+               intel_modeset_setup_hw_state(dev, true);
+               drm_modeset_unlock_all(dev);
 
                /*
                 * ... but also need to make sure that hotplug processing
@@ -588,6 +603,8 @@ static int __i915_drm_thaw(struct drm_device *dev)
                 * */
                intel_hpd_init(dev);
                dev_priv->enable_hotplug_processing = true;
+               /* Config may have changed between suspend and resume */
+               intel_resume_hotplug(dev);
        }
 
        intel_opregion_init(dev);
@@ -732,6 +749,7 @@ static int ironlake_do_reset(struct drm_device *dev)
        int ret;
 
        gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       gdrst &= ~GRDOM_MASK;
        I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
                   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
        ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -740,6 +758,7 @@ static int ironlake_do_reset(struct drm_device *dev)
 
        /* We can't reset render&media without also resetting display ... */
        gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       gdrst &= ~GRDOM_MASK;
        I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
                   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
        return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
@@ -803,7 +822,7 @@ int intel_gpu_reset(struct drm_device *dev)
 
        /* Also reset the gpu hangman. */
        if (dev_priv->gpu_error.stop_rings) {
-               DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+               DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
                dev_priv->gpu_error.stop_rings = 0;
                if (ret == -ENODEV) {
                        DRM_ERROR("Reset not implemented, but ignoring "
@@ -1147,6 +1166,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
        I915_WRITE_NOTRACE(MI_MODE, 0);
 }
 
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+       if (IS_HASWELL(dev_priv->dev) &&
+           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+                         reg);
+               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+       if (IS_HASWELL(dev_priv->dev) &&
+           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unclaimed write to %x\n", reg);
+               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+}
+
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
@@ -1183,18 +1223,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
        } \
        if (IS_GEN5(dev_priv->dev)) \
                ilk_dummy_write(dev_priv); \
-       if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
-               DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
-               I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
-       } \
+       hsw_unclaimed_reg_clear(dev_priv, reg); \
        write##y(val, dev_priv->regs + reg); \
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
        } \
-       if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
-               DRM_ERROR("Unclaimed write to %x\n", reg); \
-               writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);  \
-       } \
+       hsw_unclaimed_reg_check(dev_priv, reg); \
 }
 __i915_write(8, b)
 __i915_write(16, w)
index 01769e2a99538e9e99cd191b328d7746439d328b..44fca0b69473596de2dc2dd6620cab672aae834b 100644 (file)
@@ -86,6 +86,19 @@ enum port {
 };
 #define port_name(p) ((p) + 'A')
 
+enum hpd_pin {
+       HPD_NONE = 0,
+       HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
+       HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
+       HPD_CRT,
+       HPD_SDVO_B,
+       HPD_SDVO_C,
+       HPD_PORT_B,
+       HPD_PORT_C,
+       HPD_PORT_D,
+       HPD_NUM_PINS
+};
+
 #define I915_GEM_GPU_DOMAINS \
        (I915_GEM_DOMAIN_RENDER | \
         I915_GEM_DOMAIN_SAMPLER | \
@@ -93,7 +106,7 @@ enum port {
         I915_GEM_DOMAIN_INSTRUCTION | \
         I915_GEM_DOMAIN_VERTEX)
 
-#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
 
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -243,7 +256,7 @@ struct drm_i915_error_state {
                        int page_count;
                        u32 gtt_offset;
                        u32 *pages[0];
-               } *ringbuffer, *batchbuffer;
+               } *ringbuffer, *batchbuffer, *ctx;
                struct drm_i915_error_request {
                        long jiffies;
                        u32 seqno;
@@ -271,6 +284,9 @@ struct drm_i915_error_state {
        struct intel_display_error_state *display;
 };
 
+struct intel_crtc_config;
+struct intel_crtc;
+
 struct drm_i915_display_funcs {
        bool (*fbc_enabled)(struct drm_device *dev);
        void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
@@ -283,9 +299,11 @@ struct drm_i915_display_funcs {
        void (*update_linetime_wm)(struct drm_device *dev, int pipe,
                                 struct drm_display_mode *mode);
        void (*modeset_global_resources)(struct drm_device *dev);
+       /* Returns the active state of the crtc, and if the crtc is active,
+        * fills out the pipe-config with the hw state. */
+       bool (*get_pipe_config)(struct intel_crtc *,
+                               struct intel_crtc_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adjusted_mode,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
        void (*crtc_enable)(struct drm_crtc *crtc);
@@ -341,6 +359,7 @@ struct drm_i915_gt_funcs {
 
 struct intel_device_info {
        u32 display_mmio_offset;
+       u8 num_pipes:3;
        u8 gen;
        u8 is_mobile:1;
        u8 is_i85x:1;
@@ -905,16 +924,14 @@ typedef struct drm_i915_private {
        struct mutex dpio_lock;
 
        /** Cached value of IMR to avoid reads in updating the bitfield */
-       u32 pipestat[2];
        u32 irq_mask;
        u32 gt_irq_mask;
 
-       u32 hotplug_supported_mask;
        struct work_struct hotplug_work;
        bool enable_hotplug_processing;
 
-       int num_pipe;
        int num_pch_pll;
+       int num_plane;
 
        unsigned long cfb_size;
        unsigned int cfb_fb;
@@ -928,9 +945,14 @@ typedef struct drm_i915_private {
        struct intel_overlay *overlay;
        unsigned int sprite_scaling_enabled;
 
+       /* backlight */
+       struct {
+               int level;
+               bool enabled;
+               struct backlight_device *device;
+       } backlight;
+
        /* LVDS info */
-       int backlight_level;  /* restore backlight to this value */
-       bool backlight_enabled;
        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
@@ -1032,8 +1054,6 @@ typedef struct drm_i915_private {
         */
        struct work_struct console_resume_work;
 
-       struct backlight_device *backlight;
-
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
 
@@ -1340,6 +1360,7 @@ struct drm_i915_file_private {
 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
 #define HAS_DDI(dev)           (IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev)    (IS_HASWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -1529,17 +1550,12 @@ void i915_gem_lastclose(struct drm_device *dev);
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
-       struct scatterlist *sg = obj->pages->sgl;
-       int nents = obj->pages->nents;
-       while (nents > SG_MAX_SINGLE_ALLOC) {
-               if (n < SG_MAX_SINGLE_ALLOC - 1)
-                       break;
-
-               sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
-               n -= SG_MAX_SINGLE_ALLOC - 1;
-               nents -= SG_MAX_SINGLE_ALLOC - 1;
-       }
-       return sg_page(sg+n);
+       struct sg_page_iter sg_iter;
+
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
+               return sg_page_iter_page(&sg_iter);
+
+       return NULL;
 }
 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
@@ -1718,6 +1734,11 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+                                              u32 stolen_offset,
+                                              u32 gtt_offset,
+                                              u32 size);
 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
 /* i915_gem_tiling.c */
@@ -1848,6 +1869,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@@ -1901,4 +1924,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
                return VGACNTRL;
 }
 
+static inline void __user *to_user_ptr(u64 address)
+{
+       return (void __user *)(uintptr_t)address;
+}
+
 #endif
index 0e207e6e0df8da27513ac3f14e90050532b557d0..911bd40ef5132949f49e0c9f15d569577b18c372 100644 (file)
@@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int prefaulted = 0;
        int needs_clflush = 0;
-       struct scatterlist *sg;
-       int i;
+       struct sg_page_iter sg_iter;
 
-       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
        offset = args->offset;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
-               struct page *page;
-
-               if (i < offset >> PAGE_SHIFT)
-                       continue;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+                        offset >> PAGE_SHIFT) {
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (remain <= 0)
                        break;
@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
 
-               page = sg_page(sg);
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
 
@@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return 0;
 
        if (!access_ok(VERIFY_WRITE,
-                      (char __user *)(uintptr_t)args->data_ptr,
+                      to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
 
@@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        if (ret)
                goto out_unpin;
 
-       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
        offset = obj->gtt_offset + args->offset;
@@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        int hit_slowpath = 0;
        int needs_clflush_after = 0;
        int needs_clflush_before = 0;
-       int i;
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
 
-       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        offset = args->offset;
        obj->dirty = 1;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
-               struct page *page;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+                        offset >> PAGE_SHIFT) {
+               struct page *page = sg_page_iter_page(&sg_iter);
                int partial_cacheline_write;
 
-               if (i < offset >> PAGE_SHIFT)
-                       continue;
-
                if (remain <= 0)
                        break;
 
@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                        ((shmem_page_offset | page_length)
                                & (boot_cpu_data.x86_clflush_size - 1));
 
-               page = sg_page(sg);
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
 
@@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return 0;
 
        if (!access_ok(VERIFY_READ,
-                      (char __user *)(uintptr_t)args->data_ptr,
+                      to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+       ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
                                           args->size);
        if (ret)
                return -EFAULT;
@@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       int page_count = obj->base.size / PAGE_SIZE;
-       struct scatterlist *sg;
-       int ret, i;
+       struct sg_page_iter sg_iter;
+       int ret;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
@@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        if (obj->madv == I915_MADV_DONTNEED)
                obj->dirty = 0;
 
-       for_each_sg(obj->pages->sgl, sg, page_count, i) {
-               struct page *page = sg_page(sg);
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (obj->dirty)
                        set_page_dirty(page);
@@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
        struct page *page;
+       unsigned long last_pfn = 0;     /* suppress gcc warning */
        gfp_t gfp;
 
        /* Assert that the object is not currently in any GPU domain. As it
@@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        gfp = mapping_gfp_mask(mapping);
        gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
-       for_each_sg(st->sgl, sg, page_count, i) {
+       sg = st->sgl;
+       st->nents = 0;
+       for (i = 0; i < page_count; i++) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                if (IS_ERR(page)) {
                        i915_gem_purge(dev_priv, page_count);
@@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
 
-               sg_set_page(sg, page, PAGE_SIZE, 0);
+               if (!i || page_to_pfn(page) != last_pfn + 1) {
+                       if (i)
+                               sg = sg_next(sg);
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+               } else {
+                       sg->length += PAGE_SIZE;
+               }
+               last_pfn = page_to_pfn(page);
        }
 
+       sg_mark_end(sg);
        obj->pages = st;
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        return 0;
 
 err_pages:
-       for_each_sg(st->sgl, sg, i, page_count)
-               page_cache_release(sg_page(sg));
+       sg_mark_end(sg);
+       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+               page_cache_release(sg_page_iter_page(&sg_iter));
        sg_free_table(st);
        kfree(st);
        return PTR_ERR(page);
@@ -2123,11 +2128,11 @@ static void i915_gem_reset_fences(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
-               i915_gem_write_fence(dev, i, NULL);
-
                if (reg->obj)
                        i915_gem_object_fence_lost(reg->obj);
 
+               i915_gem_write_fence(dev, i, NULL);
+
                reg->pin_count = 0;
                reg->obj = NULL;
                INIT_LIST_HEAD(&reg->lru_list);
@@ -2717,6 +2722,7 @@ int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_fence_reg *fence;
        int ret;
 
        ret = i915_gem_object_wait_fence(obj);
@@ -2726,10 +2732,10 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
        if (obj->fence_reg == I915_FENCE_REG_NONE)
                return 0;
 
-       i915_gem_object_update_fence(obj,
-                                    &dev_priv->fence_regs[obj->fence_reg],
-                                    false);
+       fence = &dev_priv->fence_regs[obj->fence_reg];
+
        i915_gem_object_fence_lost(obj);
+       i915_gem_object_update_fence(obj, fence, false);
 
        return 0;
 }
@@ -4010,7 +4016,16 @@ int i915_gem_init(struct drm_device *dev)
        int ret;
 
        mutex_lock(&dev->struct_mutex);
+
+       if (IS_VALLEYVIEW(dev)) {
+               /* VLVA0 (potential hack), BIOS isn't actually waking us */
+               I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
+               if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+                       DRM_DEBUG_DRIVER("allow wake ack timed out\n");
+       }
+
        i915_gem_init_global_gtt(dev);
+
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
@@ -4327,7 +4342,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
                     struct drm_file *file_priv)
 {
        void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
-       char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
+       char __user *user_data = to_user_ptr(args->data_ptr);
 
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
index 6a5af6828624e257d4b2516ade02ce79eaf4d839..c6dfc1466e3a1ec2c775ea1b178e3dbce20858e7 100644 (file)
@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
        src = obj->pages->sgl;
        dst = st->sgl;
        for (i = 0; i < obj->pages->nents; i++) {
-               sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+               sg_set_page(dst, sg_page(src), src->length, 0);
                dst = sg_next(dst);
                src = sg_next(src);
        }
@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
        struct drm_i915_gem_object *obj = dma_buf->priv;
        struct drm_device *dev = obj->base.dev;
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
        struct page **pages;
        int ret, i;
 
@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 
        ret = -ENOMEM;
 
-       pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
        if (pages == NULL)
                goto error;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
-               pages[i] = sg_page(sg);
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
+               pages[i++] = sg_page_iter_page(&sg_iter);
 
-       obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+       obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
        drm_free_large(pages);
 
        if (!obj->dma_buf_vmapping)
index 3b11ab0fbc960ab1ff58fd842a2d51ff10ca15db..117ce38136812d1689ecc4b65ffe743a5e86a71c 100644 (file)
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        if (eb == NULL) {
                int size = args->buffer_count;
                int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
-               BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+               BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
                while (count > 2*size)
                        count >>= 1;
                eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
        int remain, ret;
 
-       user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+       user_relocs = to_user_ptr(entry->relocs_ptr);
 
        remain = entry->relocation_count;
        while (remain) {
@@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
-                            struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb)
 {
        struct drm_i915_gem_object *obj;
        int ret = 0;
@@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-                           struct drm_file *file,
                            struct list_head *objects,
                            bool *need_relocs)
 {
@@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                u64 invalid_offset = (u64)-1;
                int j;
 
-               user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+               user_relocs = to_user_ptr(exec[i].relocs_ptr);
 
                if (copy_from_user(reloc+total, user_relocs,
                                   exec[i].relocation_count * sizeof(*reloc))) {
@@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
        if (ret)
                goto err;
 
@@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
        int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
        for (i = 0; i < count; i++) {
-               char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+               char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
                int length; /* limited by fault_in_pages_readable() */
 
                if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
@@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 
                length = exec[i].relocation_count *
                        sizeof(struct drm_i915_gem_relocation_entry);
-               /* we may also need to update the presumed offsets */
+               /*
+                * We must check that the entire relocation array is safe
+                * to read, but since we may need to update the presumed
+                * offsets during execution, check for full write access.
+                */
                if (!access_ok(VERIFY_WRITE, ptr, length))
                        return -EFAULT;
 
@@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
 
                if (copy_from_user(cliprects,
-                                    (struct drm_clip_rect __user *)(uintptr_t)
-                                    args->cliprects_ptr,
-                                    sizeof(*cliprects)*args->num_cliprects)) {
+                                  to_user_ptr(args->cliprects_ptr),
+                                  sizeof(*cliprects)*args->num_cliprects)) {
                        ret = -EFAULT;
                        goto pre_mutex_err;
                }
@@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
        if (ret)
                goto err;
 
        /* The objects are in their final locations, apply the relocations. */
        if (need_relocs)
-               ret = i915_gem_execbuffer_relocate(dev, eb);
+               ret = i915_gem_execbuffer_relocate(eb);
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                return -ENOMEM;
        }
        ret = copy_from_user(exec_list,
-                            (void __user *)(uintptr_t)args->buffers_ptr,
+                            to_user_ptr(args->buffers_ptr),
                             sizeof(*exec_list) * args->buffer_count);
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                for (i = 0; i < args->buffer_count; i++)
                        exec_list[i].offset = exec2_list[i].offset;
                /* ... and back out to userspace */
-               ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+               ret = copy_to_user(to_user_ptr(args->buffers_ptr),
                                   exec_list,
                                   sizeof(*exec_list) * args->buffer_count);
                if (ret) {
@@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -ENOMEM;
        }
        ret = copy_from_user(exec2_list,
-                            (struct drm_i915_relocation_entry __user *)
-                            (uintptr_t) args->buffers_ptr,
+                            to_user_ptr(args->buffers_ptr),
                             sizeof(*exec2_list) * args->buffer_count);
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
-               ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+               ret = copy_to_user(to_user_ptr(args->buffers_ptr),
                                   exec2_list,
                                   sizeof(*exec2_list) * args->buffer_count);
                if (ret) {
index 926a1e2dd2349ea0aa5960bd4b0e089225631083..24a23b31b55fe188779d392276986065f2b7a975 100644 (file)
@@ -83,7 +83,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 {
        gtt_pte_t *pt_vaddr;
        gtt_pte_t scratch_pte;
-       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
@@ -96,7 +96,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
                if (last_pte > I915_PPGTT_PT_ENTRIES)
                        last_pte = I915_PPGTT_PT_ENTRIES;
 
-               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
                for (i = first_pte; i < last_pte; i++)
                        pt_vaddr[i] = scratch_pte;
@@ -105,7 +105,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 
                num_entries -= last_pte - first_pte;
                first_pte = 0;
-               act_pd++;
+               act_pt++;
        }
 }
 
@@ -115,42 +115,26 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
                                      enum i915_cache_level cache_level)
 {
        gtt_pte_t *pt_vaddr;
-       unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-       unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-       unsigned i, j, m, segment_len;
-       dma_addr_t page_addr;
-       struct scatterlist *sg;
-
-       /* init sg walking */
-       sg = pages->sgl;
-       i = 0;
-       segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
-       m = 0;
-
-       while (i < pages->nents) {
-               pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
-               for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
-                       page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-                       pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
-                                                     cache_level);
-
-                       /* grab the next page */
-                       if (++m == segment_len) {
-                               if (++i == pages->nents)
-                                       break;
-
-                               sg = sg_next(sg);
-                               segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
-                               m = 0;
-                       }
-               }
-
-               kunmap_atomic(pt_vaddr);
+       unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+       unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+       struct sg_page_iter sg_iter;
+
+       pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+       for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+               dma_addr_t page_addr;
+
+               page_addr = sg_page_iter_dma_address(&sg_iter);
+               pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
+                                                   cache_level);
+               if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+                       kunmap_atomic(pt_vaddr);
+                       act_pt++;
+                       pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+                       act_pte = 0;
 
-               first_pte = 0;
-               act_pd++;
+               }
        }
+       kunmap_atomic(pt_vaddr);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
@@ -432,21 +416,16 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
                                     enum i915_cache_level level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct scatterlist *sg = st->sgl;
        gtt_pte_t __iomem *gtt_entries =
                (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
-       int unused, i = 0;
-       unsigned int len, m = 0;
+       int i = 0;
+       struct sg_page_iter sg_iter;
        dma_addr_t addr;
 
-       for_each_sg(st->sgl, sg, st->nents, unused) {
-               len = sg_dma_len(sg) >> PAGE_SHIFT;
-               for (m = 0; m < len; m++) {
-                       addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-                       iowrite32(gen6_pte_encode(dev, addr, level),
-                                 &gtt_entries[i]);
-                       i++;
-               }
+       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+               addr = sg_page_iter_dma_address(&sg_iter);
+               iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
+               i++;
        }
 
        /* XXX: This serves as a posting read to make sure that the PTE has
@@ -752,7 +731,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
        gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
-       if (IS_GEN7(dev))
+       if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
                *stolen = gen7_get_stolen_size(snb_gmch_ctl);
        else
                *stolen = gen6_get_stolen_size(snb_gmch_ctl);
index 69d97cbac13c5754344355e602c64ad75b768a24..130d1db27e288d0479804696d33e1014e1581a18 100644 (file)
@@ -312,6 +312,71 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        return NULL;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+                                              u32 stolen_offset,
+                                              u32 gtt_offset,
+                                              u32 size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj;
+       struct drm_mm_node *stolen;
+
+       if (dev_priv->mm.stolen_base == 0)
+               return NULL;
+
+       DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+                       stolen_offset, gtt_offset, size);
+
+       /* KISS and expect everything to be page-aligned */
+       BUG_ON(stolen_offset & 4095);
+       BUG_ON(gtt_offset & 4095);
+       BUG_ON(size & 4095);
+
+       if (WARN_ON(size == 0))
+               return NULL;
+
+       stolen = drm_mm_create_block(&dev_priv->mm.stolen,
+                                    stolen_offset, size,
+                                    false);
+       if (stolen == NULL) {
+               DRM_DEBUG_KMS("failed to allocate stolen space\n");
+               return NULL;
+       }
+
+       obj = _i915_gem_object_create_stolen(dev, stolen);
+       if (obj == NULL) {
+               DRM_DEBUG_KMS("failed to allocate stolen object\n");
+               drm_mm_put_block(stolen);
+               return NULL;
+       }
+
+       /* To simplify the initialisation sequence between KMS and GTT,
+        * we allow construction of the stolen object prior to
+        * setting up the GTT space. The actual reservation will occur
+        * later.
+        */
+       if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
+               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+                                                    gtt_offset, size,
+                                                    false);
+               if (obj->gtt_space == NULL) {
+                       DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+                       drm_gem_object_unreference(&obj->base);
+                       return NULL;
+               }
+       } else
+               obj->gtt_space = I915_GTT_RESERVED;
+
+       obj->gtt_offset = gtt_offset;
+       obj->has_global_gtt_mapping = 1;
+
+       list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+       return obj;
+}
+
 void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
index abcba2f5a788b4ad3727309de95548851513120a..c807eb93755b7f00ac3f785e251684ecaf25fed6 100644 (file)
@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page)
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct scatterlist *sg;
-       int page_count = obj->base.size >> PAGE_SHIFT;
+       struct sg_page_iter sg_iter;
        int i;
 
        if (obj->bit_17 == NULL)
                return;
 
-       for_each_sg(obj->pages->sgl, sg, page_count, i) {
-               struct page *page = sg_page(sg);
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               struct page *page = sg_page_iter_page(&sg_iter);
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
                        i915_gem_swizzle_page(page);
                        set_page_dirty(page);
                }
+               i++;
        }
 }
 
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
        int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
                }
        }
 
-       for_each_sg(obj->pages->sgl, sg, page_count, i) {
-               struct page *page = sg_page(sg);
-               if (page_to_phys(page) & (1 << 17))
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
                        __clear_bit(i, obj->bit_17);
+               i++;
        }
 }
index 3c7bb0410b517fe84ae696c720441d553f63cd7f..4c5bdd03738819f348c6fc8c441fdc9b50a5d819 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+static const u32 hpd_ibx[] = {
+       [HPD_CRT] = SDE_CRT_HOTPLUG,
+       [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+       [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+       [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+       [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[] = {
+       [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+       [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+       [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+       [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+       [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_mask_i915[] = {
+       [HPD_CRT] = CRT_HOTPLUG_INT_EN,
+       [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+       [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+       [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+       [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+       [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_gen4[] = {
+       [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+       [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+       [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+       [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+       [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+       [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i965[] = {
+        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
+        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
+        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+       [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+       [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+       [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+       [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+       [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+       [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+
+
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -47,7 +101,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }
 
-static inline void
+static void
 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask & mask) != mask) {
@@ -60,26 +114,30 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
-       if ((dev_priv->pipestat[pipe] & mask) != mask) {
-               u32 reg = PIPESTAT(pipe);
+       u32 reg = PIPESTAT(pipe);
+       u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
-               dev_priv->pipestat[pipe] |= mask;
-               /* Enable the interrupt, clear any pending status */
-               I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
-               POSTING_READ(reg);
-       }
+       if ((pipestat & mask) == mask)
+               return;
+
+       /* Enable the interrupt, clear any pending status */
+       pipestat |= mask | (mask >> 16);
+       I915_WRITE(reg, pipestat);
+       POSTING_READ(reg);
 }
 
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
-       if ((dev_priv->pipestat[pipe] & mask) != 0) {
-               u32 reg = PIPESTAT(pipe);
+       u32 reg = PIPESTAT(pipe);
+       u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
-               dev_priv->pipestat[pipe] &= ~mask;
-               I915_WRITE(reg, dev_priv->pipestat[pipe]);
-               POSTING_READ(reg);
-       }
+       if ((pipestat & mask) == 0)
+               return;
+
+       pipestat &= ~mask;
+       I915_WRITE(reg, pipestat);
+       POSTING_READ(reg);
 }
 
 /**
@@ -250,10 +308,9 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                              struct timeval *vblank_time,
                              unsigned flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
 
-       if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+       if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
                DRM_ERROR("Invalid crtc %d\n", pipe);
                return -EINVAL;
        }
@@ -596,7 +653,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                         hotplug_status);
-                       if (hotplug_status & dev_priv->hotplug_supported_mask)
+                       if (hotplug_status & HOTPLUG_INT_STATUS_I915)
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
 
@@ -937,6 +994,8 @@ static void i915_error_work_func(struct work_struct *work)
                for_each_ring(ring, dev_priv, i)
                        wake_up_all(&ring->irq_queue);
 
+               intel_display_handle_reset(dev);
+
                wake_up_all(&dev_priv->gpu_error.reset_queue);
        }
 }
@@ -972,24 +1031,23 @@ static void i915_get_extra_instdone(struct drm_device *dev,
 
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
-                        struct drm_i915_gem_object *src)
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+                              struct drm_i915_gem_object *src,
+                              const int num_pages)
 {
        struct drm_i915_error_object *dst;
-       int i, count;
+       int i;
        u32 reloc_offset;
 
        if (src == NULL || src->pages == NULL)
                return NULL;
 
-       count = src->base.size / PAGE_SIZE;
-
-       dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
+       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
        if (dst == NULL)
                return NULL;
 
        reloc_offset = src->gtt_offset;
-       for (i = 0; i < count; i++) {
+       for (i = 0; i < num_pages; i++) {
                unsigned long flags;
                void *d;
 
@@ -1039,7 +1097,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 
                reloc_offset += PAGE_SIZE;
        }
-       dst->page_count = count;
+       dst->page_count = num_pages;
        dst->gtt_offset = src->gtt_offset;
 
        return dst;
@@ -1050,6 +1108,9 @@ unwind:
        kfree(dst);
        return NULL;
 }
+#define i915_error_object_create(dev_priv, src) \
+       i915_error_object_create_sized((dev_priv), (src), \
+                                      (src)->base.size>>PAGE_SHIFT)
 
 static void
 i915_error_object_free(struct drm_i915_error_object *obj)
@@ -1256,6 +1317,26 @@ static void i915_record_ring_state(struct drm_device *dev,
        error->cpu_ring_tail[ring->id] = ring->tail;
 }
 
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+                                          struct drm_i915_error_state *error,
+                                          struct drm_i915_error_ring *ering)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj;
+
+       /* Currently render ring is the only HW context user */
+       if (ring->id != RCS || !error->ccid)
+               return;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+               if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
+                       ering->ctx = i915_error_object_create_sized(dev_priv,
+                                                                   obj, 1);
+               }
+       }
+}
+
 static void i915_gem_record_rings(struct drm_device *dev,
                                  struct drm_i915_error_state *error)
 {
@@ -1273,6 +1354,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
                error->ring[i].ringbuffer =
                        i915_error_object_create(dev_priv, ring->obj);
 
+
+               i915_gem_record_active_context(ring, error, &error->ring[i]);
+
                count = 0;
                list_for_each_entry(request, &ring->request_list, list)
                        count++;
@@ -1328,14 +1412,15 @@ static void i915_capture_error_state(struct drm_device *dev)
                return;
        }
 
-       DRM_INFO("capturing error event; look for more information in"
+       DRM_INFO("capturing error event; look for more information in "
                 "/sys/kernel/debug/dri/%d/i915_error_state\n",
                 dev->primary->index);
 
        kref_init(&error->ref);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
-       error->ccid = I915_READ(CCID);
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
 
        if (HAS_PCH_SPLIT(dev))
                error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1356,8 +1441,9 @@ static void i915_capture_error_state(struct drm_device *dev)
        else if (INTEL_INFO(dev)->gen == 6)
                error->forcewake = I915_READ(FORCEWAKE);
 
-       for_each_pipe(pipe)
-               error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+       if (!HAS_PCH_SPLIT(dev))
+               for_each_pipe(pipe)
+                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
        if (INTEL_INFO(dev)->gen >= 6) {
                error->error = I915_READ(ERROR_GEN6);
@@ -1567,7 +1653,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
        queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
 }
 
-static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -1777,6 +1863,37 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
        return false;
 }
 
+static bool semaphore_passed(struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
+       struct intel_ring_buffer *signaller;
+       u32 cmd, ipehr, acthd_min;
+
+       ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+       if ((ipehr & ~(0x3 << 16)) !=
+           (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
+               return false;
+
+       /* ACTHD is likely pointing to the dword after the actual command,
+        * so scan backwards until we find the MBOX.
+        */
+       acthd_min = max((int)acthd - 3 * 4, 0);
+       do {
+               cmd = ioread32(ring->virtual_start + acthd);
+               if (cmd == ipehr)
+                       break;
+
+               acthd -= 4;
+               if (acthd < acthd_min)
+                       return false;
+       } while (1);
+
+       signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
+       return i915_seqno_passed(signaller->get_seqno(signaller, false),
+                                ioread32(ring->virtual_start+acthd+4)+1);
+}
+
 static bool kick_ring(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
@@ -1788,6 +1905,15 @@ static bool kick_ring(struct intel_ring_buffer *ring)
                I915_WRITE_CTL(ring, tmp);
                return true;
        }
+
+       if (INTEL_INFO(dev)->gen >= 6 &&
+           tmp & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(ring)) {
+               DRM_ERROR("Kicking stuck semaphore on %s\n",
+                         ring->name);
+               I915_WRITE_CTL(ring, tmp);
+               return true;
+       }
        return false;
 }
 
@@ -1903,7 +2029,13 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
 
        /* south display irq */
        I915_WRITE(SDEIMR, 0xffffffff);
-       I915_WRITE(SDEIER, 0x0);
+       /*
+        * SDEIER is also touched by the interrupt handler to work around missed
+        * PCH interrupts. Hence we can't update it after the interrupt handler
+        * is enabled - instead we unconditionally enable all PCH interrupt
+        * sources here, but then only unmask them as needed with SDEIMR.
+        */
+       I915_WRITE(SDEIER, 0xffffffff);
        POSTING_READ(SDEIER);
 }
 
@@ -1939,18 +2071,30 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
        POSTING_READ(VLV_IER);
 }
 
-/*
- * Enable digital hotplug on the PCH, and configure the DP short pulse
- * duration to 2ms (which is the minimum in the Display Port spec)
- *
- * This register is the same on all known PCH chips.
- */
-
-static void ibx_enable_hotplug(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32     hotplug;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *intel_encoder;
+       u32 mask = ~I915_READ(SDEIMR);
+       u32 hotplug;
+
+       if (HAS_PCH_IBX(dev)) {
+               list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+                       mask |= hpd_ibx[intel_encoder->hpd_pin];
+       } else {
+               list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+                       mask |= hpd_cpt[intel_encoder->hpd_pin];
+       }
+
+       I915_WRITE(SDEIMR, ~mask);
 
+       /*
+        * Enable digital hotplug on the PCH, and configure the DP short pulse
+        * duration to 2ms (which is the minimum in the Display Port spec)
+        *
+        * This register is the same on all known PCH chips.
+        */
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
        hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
@@ -1965,20 +2109,11 @@ static void ibx_irq_postinstall(struct drm_device *dev)
        u32 mask;
 
        if (HAS_PCH_IBX(dev))
-               mask = SDE_HOTPLUG_MASK |
-                      SDE_GMBUS |
-                      SDE_AUX_MASK;
+               mask = SDE_GMBUS | SDE_AUX_MASK;
        else
-               mask = SDE_HOTPLUG_MASK_CPT |
-                      SDE_GMBUS_CPT |
-                      SDE_AUX_MASK_CPT;
-
+               mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
        I915_WRITE(SDEIMR, ~mask);
-       I915_WRITE(SDEIER, mask);
-       POSTING_READ(SDEIER);
-
-       ibx_enable_hotplug(dev);
 }
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -2089,9 +2224,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
                I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
                I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
-       dev_priv->pipestat[0] = 0;
-       dev_priv->pipestat[1] = 0;
-
        /* Hack for broken MSIs on VLV */
        pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
        pci_read_config_word(dev->pdev, 0x98, &msid);
@@ -2135,30 +2267,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static void valleyview_hpd_irq_setup(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
-       /* Note HDMI and DP share bits */
-       if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTB_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTC_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTD_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
-               hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
-               hotplug_en |= SDVOB_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
-               hotplug_en |= CRT_HOTPLUG_INT_EN;
-               hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-       }
-
-       I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-}
-
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2221,9 +2329,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       dev_priv->pipestat[0] = 0;
-       dev_priv->pipestat[1] = 0;
-
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -2246,6 +2351,37 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+                              int pipe, u16 iir)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+       if (!drm_handle_vblank(dev, pipe))
+               return false;
+
+       if ((iir & flip_pending) == 0)
+               return false;
+
+       intel_prepare_page_flip(dev, pipe);
+
+       /* We detect FlipDone by looking for the change in PendingFlip from '1'
+        * to '0' on the following vblank, i.e. IIR has the Pendingflip
+        * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+        * the flip is completed (no longer pending). Since this doesn't raise
+        * an interrupt per se, we watch for the change at vblank.
+        */
+       if (I915_READ16(ISR) & flip_pending)
+               return false;
+
+       intel_finish_page_flip(dev, pipe);
+
+       return true;
+}
+
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -2301,22 +2437,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
                if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                   drm_handle_vblank(dev, 0)) {
-                       if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
-                               intel_prepare_page_flip(dev, 0);
-                               intel_finish_page_flip(dev, 0);
-                               flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
-                       }
-               }
+                   i8xx_handle_vblank(dev, 0, iir))
+                       flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
 
                if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                   drm_handle_vblank(dev, 1)) {
-                       if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
-                               intel_prepare_page_flip(dev, 1);
-                               intel_finish_page_flip(dev, 1);
-                               flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
-                       }
-               }
+                   i8xx_handle_vblank(dev, 1, iir))
+                       flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
 
                iir = new_iir;
        }
@@ -2364,9 +2490,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
 
-       dev_priv->pipestat[0] = 0;
-       dev_priv->pipestat[1] = 0;
-
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
        /* Unmask the interrupts that we always want on. */
@@ -2404,33 +2527,35 @@ static int i915_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static void i915_hpd_irq_setup(struct drm_device *dev)
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i915_handle_vblank(struct drm_device *dev,
+                              int plane, int pipe, u32 iir)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 hotplug_en;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
-       if (I915_HAS_HOTPLUG(dev)) {
-               hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+       if (!drm_handle_vblank(dev, pipe))
+               return false;
 
-               if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
-                       hotplug_en |= PORTB_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
-                       hotplug_en |= PORTC_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
-                       hotplug_en |= PORTD_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
-                       hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
-                       hotplug_en |= SDVOB_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
-                       hotplug_en |= CRT_HOTPLUG_INT_EN;
-                       hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-               }
+       if ((iir & flip_pending) == 0)
+               return false;
 
-               /* Ignore TV since it's buggy */
+       intel_prepare_page_flip(dev, plane);
 
-               I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-       }
+       /* We detect FlipDone by looking for the change in PendingFlip from '1'
+        * to '0' on the following vblank, i.e. IIR has the Pendingflip
+        * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+        * the flip is completed (no longer pending). Since this doesn't raise
+        * an interrupt per se, we watch for the change at vblank.
+        */
+       if (I915_READ(ISR) & flip_pending)
+               return false;
+
+       intel_finish_page_flip(dev, pipe);
+
+       return true;
 }
 
 static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2442,10 +2567,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
-       u32 flip[2] = {
-               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
-               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
-       };
        int pipe, ret = IRQ_NONE;
 
        atomic_inc(&dev_priv->irq_received);
@@ -2489,7 +2610,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
-                       if (hotplug_status & dev_priv->hotplug_supported_mask)
+                       if (hotplug_status & HOTPLUG_INT_STATUS_I915)
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
 
@@ -2507,14 +2628,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        int plane = pipe;
                        if (IS_MOBILE(dev))
                                plane = !plane;
+
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                           drm_handle_vblank(dev, pipe)) {
-                               if (iir & flip[plane]) {
-                                       intel_prepare_page_flip(dev, plane);
-                                       intel_finish_page_flip(dev, pipe);
-                                       flip_mask &= ~flip[plane];
-                               }
-                       }
+                           i915_handle_vblank(dev, plane, pipe, iir))
+                               flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
@@ -2603,13 +2720,13 @@ static int i965_irq_postinstall(struct drm_device *dev)
                               I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
        enable_mask = ~dev_priv->irq_mask;
+       enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+                        I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
        enable_mask |= I915_USER_INTERRUPT;
 
        if (IS_G4X(dev))
                enable_mask |= I915_BSD_USER_INTERRUPT;
 
-       dev_priv->pipestat[0] = 0;
-       dev_priv->pipestat[1] = 0;
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
 
        /*
@@ -2639,45 +2756,32 @@ static int i965_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static void i965_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
        u32 hotplug_en;
 
-       /* Note HDMI and DP share hotplug bits */
-       hotplug_en = 0;
-       if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTB_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTC_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
-               hotplug_en |= PORTD_HOTPLUG_INT_EN;
-       if (IS_G4X(dev)) {
-               if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
-                       hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
-                       hotplug_en |= SDVOB_HOTPLUG_INT_EN;
-       } else {
-               if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
-                       hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-               if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
-                       hotplug_en |= SDVOB_HOTPLUG_INT_EN;
-       }
-       if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
-               hotplug_en |= CRT_HOTPLUG_INT_EN;
-
+       if (I915_HAS_HOTPLUG(dev)) {
+               hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+               hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+               /* Note HDMI and DP share hotplug bits */
+               /* enable bits are the same for all generations */
+               list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+                       hotplug_en |= hpd_mask_i915[encoder->hpd_pin];
                /* Programming the CRT detection parameters tends
                   to generate a spurious hotplug event about three
                   seconds later.  So just do it once.
-                  */
+               */
                if (IS_G4X(dev))
                        hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+               hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
                hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-       }
-
-       /* Ignore TV since it's buggy */
 
-       I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+               /* Ignore TV since it's buggy */
+               I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+       }
 }
 
 static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2689,6 +2793,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
        unsigned long irqflags;
        int irq_received;
        int ret = IRQ_NONE, pipe;
+       u32 flip_mask =
+               I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+               I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
        atomic_inc(&dev_priv->irq_received);
 
@@ -2697,7 +2804,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
        for (;;) {
                bool blc_event = false;
 
-               irq_received = iir != 0;
+               irq_received = (iir & ~flip_mask) != 0;
 
                /* Can't rely on pipestat interrupt bit in iir as it might
                 * have been cleared after the pipestat interrupt was received.
@@ -2736,7 +2843,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
-                       if (hotplug_status & dev_priv->hotplug_supported_mask)
+                       if (hotplug_status & (IS_G4X(dev) ?
+                                             HOTPLUG_INT_STATUS_G4X :
+                                             HOTPLUG_INT_STATUS_I965))
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
 
@@ -2744,7 +2853,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                        I915_READ(PORT_HOTPLUG_STAT);
                }
 
-               I915_WRITE(IIR, iir);
+               I915_WRITE(IIR, iir & ~flip_mask);
                new_iir = I915_READ(IIR); /* Flush posted writes */
 
                if (iir & I915_USER_INTERRUPT)
@@ -2752,18 +2861,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                if (iir & I915_BSD_USER_INTERRUPT)
                        notify_ring(dev, &dev_priv->ring[VCS]);
 
-               if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
-                       intel_prepare_page_flip(dev, 0);
-
-               if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
-                       intel_prepare_page_flip(dev, 1);
-
                for_each_pipe(pipe) {
                        if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
-                           drm_handle_vblank(dev, pipe)) {
-                               i915_pageflip_stall_check(dev, pipe);
-                               intel_finish_page_flip(dev, pipe);
-                       }
+                           i915_handle_vblank(dev, pipe, pipe, iir))
+                               flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
@@ -2857,7 +2958,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->irq_uninstall = valleyview_irq_uninstall;
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
-               dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
+               dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                /* Share pre & uninstall handlers with ILK/SNB */
                dev->driver->irq_handler = ivybridge_irq_handler;
@@ -2866,6 +2967,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ivybridge_enable_vblank;
                dev->driver->disable_vblank = ivybridge_disable_vblank;
+               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2873,6 +2975,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ironlake_enable_vblank;
                dev->driver->disable_vblank = ironlake_disable_vblank;
+               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else {
                if (INTEL_INFO(dev)->gen == 2) {
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
@@ -2890,7 +2993,7 @@ void intel_irq_init(struct drm_device *dev)
                        dev->driver->irq_postinstall = i965_irq_postinstall;
                        dev->driver->irq_uninstall = i965_irq_uninstall;
                        dev->driver->irq_handler = i965_irq_handler;
-                       dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
+                       dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
                }
                dev->driver->enable_vblank = i915_enable_vblank;
                dev->driver->disable_vblank = i915_disable_vblank;
index 848992f67d56daa8910ce0e66210f51bddb2f604..058686c0dbbf6e4f4da3f163a08f4a7f8a25dde4 100644 (file)
@@ -91,6 +91,7 @@
 #define  GRDOM_FULL    (0<<2)
 #define  GRDOM_RENDER  (1<<2)
 #define  GRDOM_MEDIA   (3<<2)
+#define  GRDOM_MASK    (3<<2)
 #define  GRDOM_RESET_ENABLE (1<<0)
 
 #define GEN6_MBCUNIT_SNPCR     0x900c /* for LLC config */
 
 #define GAM_ECOCHK                     0x4090
 #define   ECOCHK_SNB_BIT               (1<<10)
+#define   HSW_ECOCHK_ARB_PRIO_SOL      (1<<6)
 #define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
 
 #define GEN7_ERR_INT   0x44040
 #define   ERR_INT_MMIO_UNCLAIMED (1<<13)
 
+#define FPGA_DBG               0x42300
+#define   FPGA_DBG_RM_NOCLAIM  (1<<31)
+
 #define DERRMR         0x44050
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
 #define   I915_USER_INTERRUPT                          (1<<1)
 #define   I915_ASLE_INTERRUPT                          (1<<0)
 #define   I915_BSD_USER_INTERRUPT                      (1<<25)
+#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
 #define EIR            0x020b0
 #define EMR            0x020b4
 #define ESR            0x020b8
 #define   SDVOC_HOTPLUG_INT_EN                 (1 << 25)
 #define   TV_HOTPLUG_INT_EN                    (1 << 18)
 #define   CRT_HOTPLUG_INT_EN                   (1 << 9)
+#define HOTPLUG_INT_EN_MASK                    (PORTB_HOTPLUG_INT_EN | \
+                                                PORTC_HOTPLUG_INT_EN | \
+                                                PORTD_HOTPLUG_INT_EN | \
+                                                SDVOC_HOTPLUG_INT_EN | \
+                                                SDVOB_HOTPLUG_INT_EN | \
+                                                CRT_HOTPLUG_INT_EN)
 #define   CRT_HOTPLUG_FORCE_DETECT             (1 << 3)
 #define CRT_HOTPLUG_ACTIVATION_PERIOD_32       (0 << 8)
 /* must use period 64 on GM45 according to docs */
 #define   SDVOB_HOTPLUG_INT_STATUS_I965                (3 << 2)
 #define   SDVOC_HOTPLUG_INT_STATUS_I915                (1 << 7)
 #define   SDVOB_HOTPLUG_INT_STATUS_I915                (1 << 6)
-
-/* SDVO port control */
-#define SDVOB                  0x61140
-#define SDVOC                  0x61160
-#define   SDVO_ENABLE          (1 << 31)
-#define   SDVO_PIPE_B_SELECT   (1 << 30)
-#define   SDVO_STALL_SELECT    (1 << 29)
-#define   SDVO_INTERRUPT_ENABLE        (1 << 26)
+#define   HOTPLUG_INT_STATUS_G4X               (CRT_HOTPLUG_INT_STATUS | \
+                                                SDVOB_HOTPLUG_INT_STATUS_G4X | \
+                                                SDVOC_HOTPLUG_INT_STATUS_G4X | \
+                                                PORTB_HOTPLUG_INT_STATUS | \
+                                                PORTC_HOTPLUG_INT_STATUS | \
+                                                PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I965                        (CRT_HOTPLUG_INT_STATUS | \
+                                                SDVOB_HOTPLUG_INT_STATUS_I965 | \
+                                                SDVOC_HOTPLUG_INT_STATUS_I965 | \
+                                                PORTB_HOTPLUG_INT_STATUS | \
+                                                PORTC_HOTPLUG_INT_STATUS | \
+                                                PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915                        (CRT_HOTPLUG_INT_STATUS | \
+                                                SDVOB_HOTPLUG_INT_STATUS_I915 | \
+                                                SDVOC_HOTPLUG_INT_STATUS_I915 | \
+                                                PORTB_HOTPLUG_INT_STATUS | \
+                                                PORTC_HOTPLUG_INT_STATUS | \
+                                                PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB     0x61140
+#define GEN3_SDVOC     0x61160
+#define GEN4_HDMIB     GEN3_SDVOB
+#define GEN4_HDMIC     GEN3_SDVOC
+#define PCH_SDVOB      0xe1140
+#define PCH_HDMIB      PCH_SDVOB
+#define PCH_HDMIC      0xe1150
+#define PCH_HDMID      0xe1160
+
+/* Gen 3 SDVO bits: */
+#define   SDVO_ENABLE                          (1 << 31)
+#define   SDVO_PIPE_SEL(pipe)                  ((pipe) << 30)
+#define   SDVO_PIPE_SEL_MASK                   (1 << 30)
+#define   SDVO_PIPE_B_SELECT                   (1 << 30)
+#define   SDVO_STALL_SELECT                    (1 << 29)
+#define   SDVO_INTERRUPT_ENABLE                        (1 << 26)
 /**
  * 915G/GM SDVO pixel multiplier.
- *
  * Programmed value is multiplier - 1, up to 5x.
- *
  * \sa DPLL_MD_UDI_MULTIPLIER_MASK
  */
-#define   SDVO_PORT_MULTIPLY_MASK      (7 << 23)
+#define   SDVO_PORT_MULTIPLY_MASK              (7 << 23)
 #define   SDVO_PORT_MULTIPLY_SHIFT             23
-#define   SDVO_PHASE_SELECT_MASK       (15 << 19)
-#define   SDVO_PHASE_SELECT_DEFAULT    (6 << 19)
-#define   SDVO_CLOCK_OUTPUT_INVERT     (1 << 18)
-#define   SDVOC_GANG_MODE              (1 << 16)
-#define   SDVO_ENCODING_SDVO           (0x0 << 10)
-#define   SDVO_ENCODING_HDMI           (0x2 << 10)
-/** Requird for HDMI operation */
-#define   SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
-#define   SDVO_COLOR_RANGE_16_235      (1 << 8)
-#define   SDVO_BORDER_ENABLE           (1 << 7)
-#define   SDVO_AUDIO_ENABLE            (1 << 6)
-/** New with 965, default is to be set */
-#define   SDVO_VSYNC_ACTIVE_HIGH       (1 << 4)
-/** New with 965, default is to be set */
-#define   SDVO_HSYNC_ACTIVE_HIGH       (1 << 3)
-#define   SDVOB_PCIE_CONCURRENCY       (1 << 3)
-#define   SDVO_DETECTED                        (1 << 2)
+#define   SDVO_PHASE_SELECT_MASK               (15 << 19)
+#define   SDVO_PHASE_SELECT_DEFAULT            (6 << 19)
+#define   SDVO_CLOCK_OUTPUT_INVERT             (1 << 18)
+#define   SDVOC_GANG_MODE                      (1 << 16) /* Port C only */
+#define   SDVO_BORDER_ENABLE                   (1 << 7) /* SDVO only */
+#define   SDVOB_PCIE_CONCURRENCY               (1 << 3) /* Port B only */
+#define   SDVO_DETECTED                                (1 << 2)
 /* Bits to be preserved when writing */
-#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
-#define   SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+                              SDVO_INTERRUPT_ENABLE)
+#define   SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define   SDVO_COLOR_FORMAT_8bpc               (0 << 26)
+#define   SDVO_ENCODING_SDVO                   (0 << 10)
+#define   SDVO_ENCODING_HDMI                   (2 << 10)
+#define   HDMI_MODE_SELECT_HDMI                        (1 << 9) /* HDMI only */
+#define   HDMI_MODE_SELECT_DVI                 (0 << 9) /* HDMI only */
+#define   HDMI_COLOR_RANGE_16_235              (1 << 8) /* HDMI only */
+#define   SDVO_AUDIO_ENABLE                    (1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define   SDVO_VSYNC_ACTIVE_HIGH               (1 << 4)
+#define   SDVO_HSYNC_ACTIVE_HIGH               (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define   HDMI_COLOR_FORMAT_12bpc              (3 << 26) /* HDMI only */
+#define   SDVOB_HOTPLUG_ENABLE                 (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define   SDVO_PIPE_SEL_CPT(pipe)              ((pipe) << 29)
+#define   SDVO_PIPE_SEL_MASK_CPT               (3 << 29)
+
 
 /* DVO port control */
 #define DVOA                   0x61120
 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
 /* Backlight control */
-#define BLC_PWM_CTL2           0x61250 /* 965+ only */
+#define BLC_PWM_CTL2   (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE               (1 << 31)
 #define   BLM_COMBINATION_MODE         (1 << 30) /* gen4 only */
 #define   BLM_PIPE_SELECT              (1 << 29)
 #define   BLM_PHASE_IN_COUNT_MASK      (0xff << 8)
 #define   BLM_PHASE_IN_INCR_SHIFT      (0)
 #define   BLM_PHASE_IN_INCR_MASK       (0xff << 0)
-#define BLC_PWM_CTL            0x61254
+#define BLC_PWM_CTL    (dev_priv->info->display_mmio_offset + 0x61254)
 /*
  * This is the most significant 15 bits of the number of backlight cycles in a
  * complete cycle of the modulated backlight control.
 #define   BACKLIGHT_DUTY_CYCLE_MASK_PNV                (0xfffe)
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
-#define BLC_HIST_CTL           0x61260
+#define BLC_HIST_CTL   (dev_priv->info->display_mmio_offset + 0x61260)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
  * register layout machtes with gen4 BLC_PWM_CTL[12]. */
 #define   DSPFW_HPLL_CURSOR_SHIFT      16
 #define   DSPFW_HPLL_CURSOR_MASK       (0x3f<<16)
 #define   DSPFW_HPLL_SR_MASK           (0x1ff)
+#define DSPFW4                 (dev_priv->info->display_mmio_offset + 0x70070)
+#define DSPFW7                 (dev_priv->info->display_mmio_offset + 0x7007c)
 
 /* drain latency register values*/
 #define DRAIN_LATENCY_PRECISION_32     32
 #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
 #define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
+#define _SPACNTR               0x72180
+#define   SP_ENABLE                    (1<<31)
+#define   SP_GEAMMA_ENABLE             (1<<30)
+#define   SP_PIXFORMAT_MASK            (0xf<<26)
+#define   SP_FORMAT_YUV422             (0<<26)
+#define   SP_FORMAT_BGR565             (5<<26)
+#define   SP_FORMAT_BGRX8888           (6<<26)
+#define   SP_FORMAT_BGRA8888           (7<<26)
+#define   SP_FORMAT_RGBX1010102                (8<<26)
+#define   SP_FORMAT_RGBA1010102                (9<<26)
+#define   SP_FORMAT_RGBX8888           (0xe<<26)
+#define   SP_FORMAT_RGBA8888           (0xf<<26)
+#define   SP_SOURCE_KEY                        (1<<22)
+#define   SP_YUV_BYTE_ORDER_MASK       (3<<16)
+#define   SP_YUV_ORDER_YUYV            (0<<16)
+#define   SP_YUV_ORDER_UYVY            (1<<16)
+#define   SP_YUV_ORDER_YVYU            (2<<16)
+#define   SP_YUV_ORDER_VYUY            (3<<16)
+#define   SP_TILED                     (1<<10)
+#define _SPALINOFF             0x72184
+#define _SPASTRIDE             0x72188
+#define _SPAPOS                        0x7218c
+#define _SPASIZE               0x72190
+#define _SPAKEYMINVAL          0x72194
+#define _SPAKEYMSK             0x72198
+#define _SPASURF               0x7219c
+#define _SPAKEYMAXVAL          0x721a0
+#define _SPATILEOFF            0x721a4
+#define _SPACONSTALPHA         0x721a8
+#define _SPAGAMC               0x721f4
+
+#define _SPBCNTR               0x72280
+#define _SPBLINOFF             0x72284
+#define _SPBSTRIDE             0x72288
+#define _SPBPOS                        0x7228c
+#define _SPBSIZE               0x72290
+#define _SPBKEYMINVAL          0x72294
+#define _SPBKEYMSK             0x72298
+#define _SPBSURF               0x7229c
+#define _SPBKEYMAXVAL          0x722a0
+#define _SPBTILEOFF            0x722a4
+#define _SPBCONSTALPHA         0x722a8
+#define _SPBGAMC               0x722f4
+
+#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 # define VGA_DISP_DISABLE                      (1 << 31)
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
-#define SDE_HOTPLUG_MASK       (0xf << 8)
+#define SDE_HOTPLUG_MASK        (SDE_CRT_HOTPLUG | \
+                                SDE_SDVOB_HOTPLUG |    \
+                                SDE_PORTB_HOTPLUG |    \
+                                SDE_PORTC_HOTPLUG |    \
+                                SDE_PORTD_HOTPLUG)
 #define SDE_TRANSB_CRC_DONE    (1 << 5)
 #define SDE_TRANSB_CRC_ERR     (1 << 4)
 #define SDE_TRANSB_FIFO_UNDER  (1 << 3)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
 #define SDE_CRT_HOTPLUG_CPT    (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT  (1 << 18)
 #define SDE_HOTPLUG_MASK_CPT   (SDE_CRT_HOTPLUG_CPT |          \
+                                SDE_SDVOB_HOTPLUG_CPT |        \
                                 SDE_PORTD_HOTPLUG_CPT |        \
                                 SDE_PORTC_HOTPLUG_CPT |        \
                                 SDE_PORTB_HOTPLUG_CPT)
 #define HSW_VIDEO_DIP_VSC_ECC_B                0x61344
 #define HSW_VIDEO_DIP_GCP_B            0x61210
 
-#define HSW_TVIDEO_DIP_CTL(pipe) \
-        _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
-#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
-        _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
-#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
-        _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
-#define HSW_TVIDEO_DIP_GCP(pipe) \
-       _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_CTL(trans) \
+        _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
+        _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
+        _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+       _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
+        _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
 
 #define _TRANS_HTOTAL_B          0xe1000
 #define _TRANS_HBLANK_B          0xe1004
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 
-/* or SDVOB */
-#define HDMIB   0xe1140
-#define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER(pipe)       ((pipe) << 30)
-#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
-#define  TRANSCODER_MASK        (1 << 30)
-#define  TRANSCODER_MASK_CPT    (3 << 29)
-#define  COLOR_FORMAT_8bpc      (0)
-#define  COLOR_FORMAT_12bpc     (3 << 26)
-#define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
-#define  SDVO_ENCODING          (0)
-#define  TMDS_ENCODING          (2 << 10)
-#define  NULL_PACKET_VSYNC_ENABLE       (1 << 9)
-/* CPT */
-#define  HDMI_MODE_SELECT      (1 << 9)
-#define  DVI_MODE_SELECT       (0)
-#define  SDVOB_BORDER_ENABLE    (1 << 7)
-#define  AUDIO_ENABLE           (1 << 6)
-#define  VSYNC_ACTIVE_HIGH      (1 << 4)
-#define  HSYNC_ACTIVE_HIGH      (1 << 3)
-#define  PORT_DETECTED          (1 << 2)
-
-/* PCH SDVOB multiplex with HDMIB */
-#define PCH_SDVOB      HDMIB
-
-#define HDMIC   0xe1150
-#define HDMID   0xe1160
-
 #define PCH_LVDS       0xe1180
 #define  LVDS_DETECTED (1 << 1)
 
 #define PIPEB_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6130c)
 #define PIPEB_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61310)
 
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+               _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+               _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+               _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
+
 #define PCH_PP_STATUS          0xc7200
 #define PCH_PP_CONTROL         0xc7204
 #define  PANEL_UNLOCK_REGS     (0xabcd << 16)
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_VLV                         0x1300b0
 #define  FORCEWAKE_ACK_VLV                     0x1300b4
+#define  FORCEWAKE_MEDIA_VLV                   0x1300b8
+#define  FORCEWAKE_ACK_MEDIA_VLV               0x1300bc
 #define  FORCEWAKE_ACK_HSW                     0x130044
 #define  FORCEWAKE_ACK                         0x130090
+#define  VLV_GTLC_WAKE_CTRL                    0x130090
+#define  VLV_GTLC_PW_STATUS                    0x130094
 #define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
 #define   FORCEWAKE_KERNEL                     0x1
 #define   FORCEWAKE_USER                       0x2
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
+#define   HSW_FREQUENCY(x)                     ((x)<<24)
 #define   GEN6_OFFSET(x)                       ((x)<<19)
 #define   GEN6_AGGRESSIVE_TURBO                        (0<<15)
 #define GEN6_RC_VIDEO_FREQ                     0xA00C
 #define GEN6_PCODE_DATA                                0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 
+#define VLV_IOSF_DOORBELL_REQ                  0x182100
+#define   IOSF_DEVFN_SHIFT                     24
+#define   IOSF_OPCODE_SHIFT                    16
+#define   IOSF_PORT_SHIFT                      8
+#define   IOSF_BYTE_ENABLES_SHIFT              4
+#define   IOSF_BAR_SHIFT                       1
+#define   IOSF_SB_BUSY                         (1<<0)
+#define   IOSF_PORT_PUNIT                      0x4
+#define VLV_IOSF_DATA                          0x182104
+#define VLV_IOSF_ADDR                          0x182108
+
+#define PUNIT_OPCODE_REG_READ                  6
+#define PUNIT_OPCODE_REG_WRITE                 7
+
 #define GEN6_GT_CORE_STATUS            0x138060
 #define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
 #define   GEN6_RCn_MASK                        7
index 2135f21ea45870aa3475f55bde828b61a5954a20..41f0fdecfbdc27c27f3132d39130e14ba19fd999 100644 (file)
@@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev)
                dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
                dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
                dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
-               dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+               if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+                       dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
        } else {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev)
 static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 mask = 0xffffffff;
 
        /* Display arbitration */
        if (INTEL_INFO(dev)->gen <= 4)
@@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
 
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
-       } else if (IS_MOBILE(dev) && !IS_I830(dev))
-               I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               mask = ~LVDS_PORT_EN;
+
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+       else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+               I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
 
        if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
                I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
index 9462081b1e603af887bd2da1e1718010a8f16567..a3a3e22f1a84aecfa7ad570db7d913c093dc6cfd 100644 (file)
@@ -49,7 +49,7 @@ static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
-       return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+       return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
@@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
        u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
-       return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+       return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
 static ssize_t
@@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
        u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
-       return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+       return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
@@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
        u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
-       return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+       return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
@@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
        ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return snprintf(buf, PAGE_SIZE, "%d", ret);
+       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
        ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return snprintf(buf, PAGE_SIZE, "%d", ret);
+       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -280,7 +280,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
        ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       return snprintf(buf, PAGE_SIZE, "%d", ret);
+       return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -355,7 +355,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
        } else {
                BUG();
        }
-       return snprintf(buf, PAGE_SIZE, "%d", val);
+       return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
 static const struct attribute *gen6_attrs[] = {
index 32a3693905ecb14f697fd82b4f8a2419e2818552..1d8d63aff44443ef2603dc49d73af14e882ce687 100644 (file)
@@ -45,6 +45,9 @@
 
 struct intel_crt {
        struct intel_encoder base;
+       /* DPMS state is stored in the connector, which we need in the
+        * encoder's enable/disable callbacks */
+       struct intel_connector *connector;
        bool force_hotplug_required;
        u32 adpa_reg;
 };
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 temp;
-
-       temp = I915_READ(crt->adpa_reg);
-       temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
-       temp &= ~ADPA_DAC_ENABLE;
-       I915_WRITE(crt->adpa_reg, temp);
-}
-
-static void intel_enable_crt(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 temp;
-
-       temp = I915_READ(crt->adpa_reg);
-       temp |= ADPA_DAC_ENABLE;
-       I915_WRITE(crt->adpa_reg, temp);
-}
-
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
        I915_WRITE(crt->adpa_reg, temp);
 }
 
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+       struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+       intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
 static void intel_crt_dpms(struct drm_connector *connector, int mode)
 {
        struct drm_device *dev = connector->dev;
@@ -206,10 +199,14 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
-                                const struct drm_display_mode *mode,
-                                struct drm_display_mode *adjusted_mode)
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+                                    struct intel_crtc_config *pipe_config)
 {
+       struct drm_device *dev = encoder->base.dev;
+
+       if (HAS_PCH_SPLIT(dev))
+               pipe_config->has_pch_encoder = true;
+
        return true;
 }
 
@@ -683,7 +680,6 @@ static void intel_crt_reset(struct drm_connector *connector)
  */
 
 static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
-       .mode_fixup = intel_crt_mode_fixup,
        .mode_set = intel_crt_mode_set,
 };
 
@@ -746,6 +742,7 @@ void intel_crt_init(struct drm_device *dev)
        }
 
        connector = &intel_connector->base;
+       crt->connector = intel_connector;
        drm_connector_init(dev, &intel_connector->base,
                           &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
@@ -774,8 +771,11 @@ void intel_crt_init(struct drm_device *dev)
        else
                crt->adpa_reg = ADPA;
 
+       crt->base.compute_config = intel_crt_compute_config;
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
+       if (I915_HAS_HOTPLUG(dev))
+               crt->base.hpd_pin = HPD_CRT;
        if (HAS_DDI(dev))
                crt->base.get_hw_state = intel_ddi_get_hw_state;
        else
@@ -797,8 +797,6 @@ void intel_crt_init(struct drm_device *dev)
         */
        crt->force_hotplug_required = 0;
 
-       dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
-
        /*
         * TODO: find a proper way to discover whether we need to set the the
         * polarity and link reversal bits or not, instead of relying on the
index 8d0bac3c35d7df012225ea47e922c60b353a0915..22524cb6903b10c29626157b269b116055e1034a 100644 (file)
@@ -898,6 +898,9 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
                        plls->spll_refcount++;
                        reg = SPLL_CTL;
                        intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+               } else {
+                       DRM_ERROR("SPLL already in use\n");
+                       return false;
                }
 
                WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
@@ -928,7 +931,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 
                temp = TRANS_MSA_SYNC_CLK;
-               switch (intel_crtc->bpp) {
+               switch (intel_crtc->config.pipe_bpp) {
                case 18:
                        temp |= TRANS_MSA_6_BPC;
                        break;
@@ -942,15 +945,13 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
                        temp |= TRANS_MSA_12_BPC;
                        break;
                default:
-                       temp |= TRANS_MSA_8_BPC;
-                       WARN(1, "%d bpp unsupported by DDI function\n",
-                            intel_crtc->bpp);
+                       BUG();
                }
                I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
        }
 }
 
-void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
@@ -966,7 +967,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
        temp = TRANS_DDI_FUNC_ENABLE;
        temp |= TRANS_DDI_SELECT_PORT(port);
 
-       switch (intel_crtc->bpp) {
+       switch (intel_crtc->config.pipe_bpp) {
        case 18:
                temp |= TRANS_DDI_BPC_6;
                break;
@@ -980,8 +981,7 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
                temp |= TRANS_DDI_BPC_12;
                break;
        default:
-               WARN(1, "%d bpp unsupported by transcoder DDI function\n",
-                    intel_crtc->bpp);
+               BUG();
        }
 
        if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
@@ -1150,14 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
 
-       return true;
+       return false;
 }
 
 static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
                                       enum pipe pipe)
 {
        uint32_t temp, ret;
-       enum port port;
+       enum port port = I915_MAX_PORTS;
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
        int i;
@@ -1173,10 +1173,16 @@ static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
                                port = i;
        }
 
-       ret = I915_READ(PORT_CLK_SEL(port));
-
-       DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
-                     pipe_name(pipe), port_name(port), ret);
+       if (port == I915_MAX_PORTS) {
+               WARN(1, "Pipe %c enabled on an unknown port\n",
+                    pipe_name(pipe));
+               ret = PORT_CLK_SEL_NONE;
+       } else {
+               ret = I915_READ(PORT_CLK_SEL(port));
+               DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
+                             "0x%08x\n", pipe_name(pipe), port_name(port),
+                             ret);
+       }
 
        return ret;
 }
@@ -1341,15 +1347,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
+       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+       tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                ironlake_edp_backlight_off(intel_dp);
        }
-
-       tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-       tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
-       I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
 }
 
 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1467,19 +1473,17 @@ static void intel_ddi_destroy(struct drm_encoder *encoder)
        intel_dp_encoder_destroy(encoder);
 }
 
-static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
-                                const struct drm_display_mode *mode,
-                                struct drm_display_mode *adjusted_mode)
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+                                    struct intel_crtc_config *pipe_config)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-       int type = intel_encoder->type;
+       int type = encoder->type;
 
-       WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+       WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
 
        if (type == INTEL_OUTPUT_HDMI)
-               return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+               return intel_hdmi_compute_config(encoder, pipe_config);
        else
-               return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+               return intel_dp_compute_config(encoder, pipe_config);
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -1487,7 +1491,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
-       .mode_fixup = intel_ddi_mode_fixup,
        .mode_set = intel_ddi_mode_set,
 };
 
@@ -1527,6 +1530,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
 
+       intel_encoder->compute_config = intel_ddi_compute_config;
        intel_encoder->enable = intel_enable_ddi;
        intel_encoder->pre_enable = intel_ddi_pre_enable;
        intel_encoder->disable = intel_disable_ddi;
@@ -1537,9 +1541,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
                                        DDI_BUF_PORT_REVERSAL;
        if (hdmi_connector)
-               intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
-       else
-               intel_dig_port->hdmi.sdvox_reg = 0;
+               intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
        intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
        intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
index b20d50192fcc8f3dda5c7efbe098d1a560e95f71..b7005640144c291af0e472b439a26da4b2b51aa3 100644 (file)
@@ -71,8 +71,24 @@ typedef struct intel_limit intel_limit_t;
 struct intel_limit {
        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
        intel_p2_t          p2;
-       bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
-                       int, int, intel_clock_t *, intel_clock_t *);
+       /**
+        * find_pll() - Find the best values for the PLL
+        * @limit: limits for the PLL
+        * @crtc: current CRTC
+        * @target: target frequency in kHz
+        * @refclk: reference clock frequency in kHz
+        * @match_clock: if provided, @best_clock P divider must
+        *               match the P divider from @match_clock
+        *               used for LVDS downclocking
+        * @best_clock: best PLL values found
+        *
+        * Returns true on success, false on failure.
+        */
+       bool (*find_pll)(const intel_limit_t *limit,
+                        struct drm_crtc *crtc,
+                        int target, int refclk,
+                        intel_clock_t *match_clock,
+                        intel_clock_t *best_clock);
 };
 
 /* FDI */
@@ -471,7 +487,6 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
                if (intel_is_dual_link_lvds(dev)) {
-                       /* LVDS dual channel */
                        if (refclk == 100000)
                                limit = &intel_limits_ironlake_dual_lvds_100m;
                        else
@@ -498,10 +513,8 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
                if (intel_is_dual_link_lvds(dev))
-                       /* LVDS with dual channel */
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
-                       /* LVDS with dual channel */
                        limit = &intel_limits_g4x_single_channel_lvds;
        } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
                   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
@@ -1254,7 +1267,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
        int cur_pipe;
 
        /* Planes are fixed to pipes on ILK+ */
-       if (HAS_PCH_SPLIT(dev_priv->dev)) {
+       if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
                reg = DSPCNTR(pipe);
                val = I915_READ(reg);
                WARN((val & DISPLAY_PLANE_ENABLE),
@@ -1275,6 +1288,25 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
        }
 }
 
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+                                   enum pipe pipe)
+{
+       int reg, i;
+       u32 val;
+
+       if (!IS_VALLEYVIEW(dev_priv->dev))
+               return;
+
+       /* Need to check both planes against the pipe */
+       for (i = 0; i < dev_priv->num_plane; i++) {
+               reg = SPCNTR(pipe, i);
+               val = I915_READ(reg);
+               WARN((val & SP_ENABLE),
+                    "sprite %d assertion failure, should be off on pipe %c but is still active\n",
+                    pipe * 2 + i, pipe_name(pipe));
+       }
+}
+
 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 {
        u32 val;
@@ -1327,14 +1359,14 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
                              enum pipe pipe, u32 val)
 {
-       if ((val & PORT_ENABLE) == 0)
+       if ((val & SDVO_ENABLE) == 0)
                return false;
 
        if (HAS_PCH_CPT(dev_priv->dev)) {
-               if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+               if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
                        return false;
        } else {
-               if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+               if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
                        return false;
        }
        return true;
@@ -1392,7 +1424,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 
-       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
             && (val & SDVO_PIPE_B_SELECT),
             "IBX PCH hdmi port still using transcoder B\n");
 }
@@ -1419,9 +1451,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
             "PCH LVDS enabled on transcoder %c, should be disabled\n",
             pipe_name(pipe));
 
-       assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
-       assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
-       assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
 /**
@@ -1859,6 +1891,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
         * or we might hang the display.
         */
        assert_planes_disabled(dev_priv, pipe);
+       assert_sprites_disabled(dev_priv, pipe);
 
        /* Don't disable pipe A or pipe A PLLs if needed */
        if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
@@ -1937,6 +1970,15 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
        intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
                           struct drm_i915_gem_object *obj,
@@ -1967,6 +2009,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                BUG();
        }
 
+       /* Note that the w/a also requires 64 PTE of padding following the
+        * bo. We currently fill all unused PTE with the shadow page and so
+        * we should always have valid PTE following the scanout preventing
+        * the VT-d warning.
+        */
+       if (need_vtd_wa(dev) && alignment < 256 * 1024)
+               alignment = 256 * 1024;
+
        dev_priv->mm.interruptible = false;
        ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
        if (ret)
@@ -2083,8 +2133,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                dspcntr |= DISPPLANE_RGBX101010;
                break;
        default:
-               DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
-               return -EINVAL;
+               BUG();
        }
 
        if (INTEL_INFO(dev)->gen >= 4) {
@@ -2177,8 +2226,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
                dspcntr |= DISPPLANE_RGBX101010;
                break;
        default:
-               DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
-               return -EINVAL;
+               BUG();
        }
 
        if (obj->tiling_mode != I915_TILING_NONE)
@@ -2229,6 +2277,44 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return dev_priv->display.update_plane(crtc, fb, x, y);
 }
 
+void intel_display_handle_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+
+       /*
+        * Flips in the rings have been nuked by the reset,
+        * so complete all pending flips so that user space
+        * will get its events and not get stuck.
+        *
+        * Also update the base address of all primary
+        * planes to the the last fb to make sure we're
+        * showing the correct fb after a reset.
+        *
+        * Need to make two loops over the crtcs so that we
+        * don't try to grab a crtc mutex before the
+        * pending_flip_queue really got woken up.
+        */
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               enum plane plane = intel_crtc->plane;
+
+               intel_prepare_page_flip(dev, plane);
+               intel_finish_page_flip_plane(dev, plane);
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+               mutex_lock(&crtc->mutex);
+               if (intel_crtc->active)
+                       dev_priv->display.update_plane(crtc, crtc->fb,
+                                                      crtc->x, crtc->y);
+               mutex_unlock(&crtc->mutex);
+       }
+}
+
 static int
 intel_finish_fb(struct drm_framebuffer *old_fb)
 {
@@ -2295,10 +2381,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return 0;
        }
 
-       if(intel_crtc->plane > dev_priv->num_pipe) {
+       if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
                DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
                                intel_crtc->plane,
-                               dev_priv->num_pipe);
+                               INTEL_INFO(dev)->num_pipes);
                return -EINVAL;
        }
 
@@ -2312,9 +2398,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       if (crtc->fb)
-               intel_finish_fb(crtc->fb);
-
        ret = dev_priv->display.update_plane(crtc, fb, x, y);
        if (ret) {
                intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2912,32 +2995,6 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct intel_encoder *intel_encoder;
-
-       /*
-        * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
-        * must be driven by its own crtc; no sharing is possible.
-        */
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-               switch (intel_encoder->type) {
-               case INTEL_OUTPUT_EDP:
-                       if (!intel_encoder_is_pch_edp(&intel_encoder->base))
-                               return false;
-                       continue;
-               }
-       }
-
-       return true;
-}
-
-static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
-{
-       return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
-}
-
 /* Program iCLKIP clock to the desired frequency */
 static void lpt_program_iclkip(struct drm_crtc *crtc)
 {
@@ -3273,7 +3330,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        u32 temp;
-       bool is_pch_port;
 
        WARN_ON(!crtc->enabled);
 
@@ -3289,9 +3345,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                        I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
        }
 
-       is_pch_port = ironlake_crtc_driving_pch(crtc);
 
-       if (is_pch_port) {
+       if (intel_crtc->config.has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
                 * cpu pipes, hence this is separate from all the other fdi/pch
                 * enabling. */
@@ -3328,10 +3383,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
         */
        intel_crtc_load_lut(crtc);
 
-       intel_enable_pipe(dev_priv, pipe, is_pch_port);
+       intel_enable_pipe(dev_priv, pipe,
+                         intel_crtc->config.has_pch_encoder);
        intel_enable_plane(dev_priv, plane, pipe);
 
-       if (is_pch_port)
+       if (intel_crtc->config.has_pch_encoder)
                ironlake_pch_enable(crtc);
 
        mutex_lock(&dev->struct_mutex);
@@ -3365,7 +3421,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       bool is_pch_port;
 
        WARN_ON(!crtc->enabled);
 
@@ -3375,9 +3430,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_crtc->active = true;
        intel_update_watermarks(dev);
 
-       is_pch_port = haswell_crtc_driving_pch(crtc);
-
-       if (is_pch_port)
+       if (intel_crtc->config.has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -3406,12 +3459,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_crtc_load_lut(crtc);
 
        intel_ddi_set_pipe_settings(crtc);
-       intel_ddi_enable_pipe_func(crtc);
+       intel_ddi_enable_transcoder_func(crtc);
 
-       intel_enable_pipe(dev_priv, pipe, is_pch_port);
+       intel_enable_pipe(dev_priv, pipe,
+                         intel_crtc->config.has_pch_encoder);
        intel_enable_plane(dev_priv, plane, pipe);
 
-       if (is_pch_port)
+       if (intel_crtc->config.has_pch_encoder)
                lpt_pch_enable(crtc);
 
        mutex_lock(&dev->struct_mutex);
@@ -3523,13 +3577,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-       bool is_pch_port;
 
        if (!intel_crtc->active)
                return;
 
-       is_pch_port = haswell_crtc_driving_pch(crtc);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
 
@@ -3556,7 +3607,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       if (is_pch_port) {
+       if (intel_crtc->config.has_pch_encoder) {
                lpt_disable_pch_transcoder(dev_priv);
                intel_ddi_fdi_disable(crtc);
        }
@@ -3906,22 +3957,23 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
        return encoder->get_hw_state(encoder, &pipe);
 }
 
-static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
+static bool intel_crtc_compute_config(struct drm_crtc *crtc,
+                                     struct intel_crtc_config *pipe_config)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
        if (HAS_PCH_SPLIT(dev)) {
                /* FDI link clock is fixed at 2.7G */
-               if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+               if (pipe_config->requested_mode.clock * 3
+                   > IRONLAKE_FDI_FREQ * 4)
                        return false;
        }
 
        /* All interlaced capable intel hw wants timings in frames. Note though
         * that intel_lvds_mode_fixup does some funny tricks with the crtc
         * timings, so we need to be careful not to clobber these.*/
-       if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+       if (!pipe_config->timings_set)
                drm_mode_set_crtcinfo(adjusted_mode, 0);
 
        /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
@@ -3931,6 +3983,14 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
                adjusted_mode->hsync_start == adjusted_mode->hdisplay)
                return false;
 
+       if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10) {
+               pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
+       } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8) {
+               /* only a 8bpc pipe, with 6bpc dither through the panel fitter
+                * for lvds. */
+               pipe_config->pipe_bpp = 8*3;
+       }
+
        return true;
 }
 
@@ -4034,142 +4094,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-/**
- * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
- * @crtc: CRTC structure
- * @mode: requested mode
- *
- * A pipe may be connected to one or more outputs.  Based on the depth of the
- * attached framebuffer, choose a good color depth to use on the pipe.
- *
- * If possible, match the pipe depth to the fb depth.  In some cases, this
- * isn't ideal, because the connected output supports a lesser or restricted
- * set of depths.  Resolve that here:
- *    LVDS typically supports only 6bpc, so clamp down in that case
- *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
- *    Displays may support a restricted set as well, check EDID and clamp as
- *      appropriate.
- *    DP may want to dither down to 6bpc to fit larger modes
- *
- * RETURNS:
- * Dithering requirement (i.e. false if display bpc and pipe bpc match,
- * true if they don't match).
- */
-static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-                                        struct drm_framebuffer *fb,
-                                        unsigned int *pipe_bpp,
-                                        struct drm_display_mode *mode)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
-       struct intel_encoder *intel_encoder;
-       unsigned int display_bpc = UINT_MAX, bpc;
-
-       /* Walk the encoders & connectors on this crtc, get min bpc */
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
-               if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
-                       unsigned int lvds_bpc;
-
-                       if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
-                           LVDS_A3_POWER_UP)
-                               lvds_bpc = 8;
-                       else
-                               lvds_bpc = 6;
-
-                       if (lvds_bpc < display_bpc) {
-                               DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
-                               display_bpc = lvds_bpc;
-                       }
-                       continue;
-               }
-
-               /* Not one of the known troublemakers, check the EDID */
-               list_for_each_entry(connector, &dev->mode_config.connector_list,
-                                   head) {
-                       if (connector->encoder != &intel_encoder->base)
-                               continue;
-
-                       /* Don't use an invalid EDID bpc value */
-                       if (connector->display_info.bpc &&
-                           connector->display_info.bpc < display_bpc) {
-                               DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
-                               display_bpc = connector->display_info.bpc;
-                       }
-               }
-
-               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-                       /* Use VBT settings if we have an eDP panel */
-                       unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
-                       if (edp_bpc && edp_bpc < display_bpc) {
-                               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
-                               display_bpc = edp_bpc;
-                       }
-                       continue;
-               }
-
-               /*
-                * HDMI is either 12 or 8, so if the display lets 10bpc sneak
-                * through, clamp it down.  (Note: >12bpc will be caught below.)
-                */
-               if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
-                       if (display_bpc > 8 && display_bpc < 12) {
-                               DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
-                               display_bpc = 12;
-                       } else {
-                               DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
-                               display_bpc = 8;
-                       }
-               }
-       }
-
-       if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
-               DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
-               display_bpc = 6;
-       }
-
-       /*
-        * We could just drive the pipe at the highest bpc all the time and
-        * enable dithering as needed, but that costs bandwidth.  So choose
-        * the minimum value that expresses the full color range of the fb but
-        * also stays within the max display bpc discovered above.
-        */
-
-       switch (fb->depth) {
-       case 8:
-               bpc = 8; /* since we go through a colormap */
-               break;
-       case 15:
-       case 16:
-               bpc = 6; /* min is 18bpp */
-               break;
-       case 24:
-               bpc = 8;
-               break;
-       case 30:
-               bpc = 10;
-               break;
-       case 48:
-               bpc = 12;
-               break;
-       default:
-               DRM_DEBUG("unsupported depth, assuming 24 bits\n");
-               bpc = min((unsigned int)8, display_bpc);
-               break;
-       }
-
-       display_bpc = min(display_bpc, bpc);
-
-       DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
-                     bpc, display_bpc);
-
-       *pipe_bpp = display_bpc * 3;
-
-       return display_bpc != bpc;
-}
-
 static int vlv_get_refclk(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -4214,37 +4138,38 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
        return refclk;
 }
 
-static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
-                                     intel_clock_t *clock)
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
 {
+       unsigned dotclock = crtc->config.adjusted_mode.clock;
+       struct dpll *clock = &crtc->config.dpll;
+
        /* SDVO TV has fixed PLL values depend on its clock range,
           this mirrors vbios setting. */
-       if (adjusted_mode->clock >= 100000
-           && adjusted_mode->clock < 140500) {
+       if (dotclock >= 100000 && dotclock < 140500) {
                clock->p1 = 2;
                clock->p2 = 10;
                clock->n = 3;
                clock->m1 = 16;
                clock->m2 = 8;
-       } else if (adjusted_mode->clock >= 140500
-                  && adjusted_mode->clock <= 200000) {
+       } else if (dotclock >= 140500 && dotclock <= 200000) {
                clock->p1 = 1;
                clock->p2 = 10;
                clock->n = 6;
                clock->m1 = 12;
                clock->m2 = 8;
        }
+
+       crtc->config.clock_set = true;
 }
 
-static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
-                                    intel_clock_t *clock,
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
                                     intel_clock_t *reduced_clock)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 fp, fp2 = 0;
+       struct dpll *clock = &crtc->config.dpll;
 
        if (IS_PINEVIEW(dev)) {
                fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
@@ -4260,26 +4185,29 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
 
        I915_WRITE(FP0(pipe), fp);
 
-       intel_crtc->lowfreq_avail = false;
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+       crtc->lowfreq_avail = false;
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
            reduced_clock && i915_powersave) {
                I915_WRITE(FP1(pipe), fp2);
-               intel_crtc->lowfreq_avail = true;
+               crtc->lowfreq_avail = true;
        } else {
                I915_WRITE(FP1(pipe), fp);
        }
 }
 
-static void vlv_update_pll(struct drm_crtc *crtc,
-                          struct drm_display_mode *mode,
-                          struct drm_display_mode *adjusted_mode,
-                          intel_clock_t *clock, intel_clock_t *reduced_clock,
-                          int num_connectors)
+static void intel_dp_set_m_n(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
+       if (crtc->config.has_pch_encoder)
+               intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+       else
+               intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+}
+
+static void vlv_update_pll(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 dpll, mdiv, pdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
        bool is_sdvo;
@@ -4287,8 +4215,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 
        mutex_lock(&dev_priv->dpio_lock);
 
-       is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
-               intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+       is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+               intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
 
        dpll = DPLL_VGA_MODE_DIS;
        dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
@@ -4298,11 +4226,11 @@ static void vlv_update_pll(struct drm_crtc *crtc,
        I915_WRITE(DPLL(pipe), dpll);
        POSTING_READ(DPLL(pipe));
 
-       bestn = clock->n;
-       bestm1 = clock->m1;
-       bestm2 = clock->m2;
-       bestp1 = clock->p1;
-       bestp2 = clock->p2;
+       bestn = crtc->config.dpll.n;
+       bestm1 = crtc->config.dpll.m1;
+       bestm2 = crtc->config.dpll.m2;
+       bestp1 = crtc->config.dpll.p1;
+       bestp2 = crtc->config.dpll.p2;
 
        /*
         * In Valleyview PLL and program lane counter registers are exposed
@@ -4334,8 +4262,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 
        intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
-               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       if (crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(crtc);
 
        I915_WRITE(DPLL(pipe), dpll);
 
@@ -4345,26 +4273,25 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 
        temp = 0;
        if (is_sdvo) {
-               temp = intel_mode_get_pixel_multiplier(adjusted_mode);
-               if (temp > 1)
-                       temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-               else
-                       temp = 0;
+               temp = 0;
+               if (crtc->config.pixel_multiplier > 1) {
+                       temp = (crtc->config.pixel_multiplier - 1)
+                               << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+               }
        }
        I915_WRITE(DPLL_MD(pipe), temp);
        POSTING_READ(DPLL_MD(pipe));
 
        /* Now program lane control registers */
-       if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
-                       || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
-       {
+       if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
+          || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
                temp = 0x1000C4;
                if(pipe == 1)
                        temp |= (1 << 21);
                intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
        }
-       if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
-       {
+
+       if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
                temp = 0x1000C4;
                if(pipe == 1)
                        temp |= (1 << 21);
@@ -4374,40 +4301,39 @@ static void vlv_update_pll(struct drm_crtc *crtc,
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
-static void i9xx_update_pll(struct drm_crtc *crtc,
-                           struct drm_display_mode *mode,
-                           struct drm_display_mode *adjusted_mode,
-                           intel_clock_t *clock, intel_clock_t *reduced_clock,
+static void i9xx_update_pll(struct intel_crtc *crtc,
+                           intel_clock_t *reduced_clock,
                            int num_connectors)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 dpll;
        bool is_sdvo;
+       struct dpll *clock = &crtc->config.dpll;
 
-       i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+       i9xx_update_pll_dividers(crtc, reduced_clock);
 
-       is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
-               intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+       is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+               intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
+
        if (is_sdvo) {
-               int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-               if (pixel_multiplier > 1) {
-                       if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-                               dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+               if ((crtc->config.pixel_multiplier > 1) &&
+                   (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
+                       dpll |= (crtc->config.pixel_multiplier - 1)
+                               << SDVO_MULTIPLIER_SHIFT_HIRES;
                }
                dpll |= DPLL_DVO_HIGH_SPEED;
        }
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
                dpll |= DPLL_DVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -4435,13 +4361,13 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
        if (INTEL_INFO(dev)->gen >= 4)
                dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
-       if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+       if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
                dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+       else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
                /* XXX: just matching BIOS for now */
                /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
                dpll |= 3;
-       else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+       else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -4452,12 +4378,12 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
        POSTING_READ(DPLL(pipe));
        udelay(150);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
-               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       if (crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(crtc);
 
        I915_WRITE(DPLL(pipe), dpll);
 
@@ -4468,11 +4394,11 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
        if (INTEL_INFO(dev)->gen >= 4) {
                u32 temp = 0;
                if (is_sdvo) {
-                       temp = intel_mode_get_pixel_multiplier(adjusted_mode);
-                       if (temp > 1)
-                               temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-                       else
-                               temp = 0;
+                       temp = 0;
+                       if (crtc->config.pixel_multiplier > 1) {
+                               temp = (crtc->config.pixel_multiplier - 1)
+                                       << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+                       }
                }
                I915_WRITE(DPLL_MD(pipe), temp);
        } else {
@@ -4485,23 +4411,23 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
        }
 }
 
-static void i8xx_update_pll(struct drm_crtc *crtc,
+static void i8xx_update_pll(struct intel_crtc *crtc,
                            struct drm_display_mode *adjusted_mode,
-                           intel_clock_t *clock, intel_clock_t *reduced_clock,
+                           intel_clock_t *reduced_clock,
                            int num_connectors)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 dpll;
+       struct dpll *clock = &crtc->config.dpll;
 
-       i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+       i9xx_update_pll_dividers(crtc, reduced_clock);
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        } else {
                if (clock->p1 == 2)
@@ -4512,11 +4438,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
-               /* XXX: just matching BIOS for now */
-               /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
-               dpll |= 3;
-       else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -4527,7 +4449,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
        POSTING_READ(DPLL(pipe));
        udelay(150);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
@@ -4603,22 +4525,92 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
                   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 }
 
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t pipeconf;
+
+       pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
+
+       if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+               /* Enable pixel doubling when the dot clock is > 90% of the (display)
+                * core speed.
+                *
+                * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+                * pipe == 0 check?
+                */
+               if (intel_crtc->config.requested_mode.clock >
+                   dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+                       pipeconf |= PIPECONF_DOUBLE_WIDE;
+               else
+                       pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+       }
+
+       /* default to 8bpc */
+       pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
+       if (intel_crtc->config.has_dp_encoder) {
+               if (intel_crtc->config.dither) {
+                       pipeconf |= PIPECONF_6BPC |
+                                   PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+               }
+       }
+
+       if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
+                                                     INTEL_OUTPUT_EDP)) {
+               if (intel_crtc->config.dither) {
+                       pipeconf |= PIPECONF_6BPC |
+                                       PIPECONF_ENABLE |
+                                       I965_PIPECONF_ACTIVE;
+               }
+       }
+
+       if (HAS_PIPE_CXSR(dev)) {
+               if (intel_crtc->lowfreq_avail) {
+                       DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+                       pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+               } else {
+                       DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+                       pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+               }
+       }
+
+       pipeconf &= ~PIPECONF_INTERLACE_MASK;
+       if (!IS_GEN2(dev) &&
+           intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+               pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+       else
+               pipeconf |= PIPECONF_PROGRESSIVE;
+
+       if (IS_VALLEYVIEW(dev)) {
+               if (intel_crtc->config.limited_color_range)
+                       pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+               else
+                       pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
+       }
+
+       I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+       POSTING_READ(PIPECONF(intel_crtc->pipe));
+}
+
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
-                             struct drm_display_mode *mode,
-                             struct drm_display_mode *adjusted_mode,
                              int x, int y,
                              struct drm_framebuffer *fb)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
-       u32 dspcntr, pipeconf;
+       u32 dspcntr;
        bool ok, has_reduced_clock = false, is_sdvo = false;
-       bool is_lvds = false, is_tv = false, is_dp = false;
+       bool is_lvds = false, is_tv = false;
        struct intel_encoder *encoder;
        const intel_limit_t *limit;
        int ret;
@@ -4637,9 +4629,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                case INTEL_OUTPUT_TVOUT:
                        is_tv = true;
                        break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       is_dp = true;
-                       break;
                }
 
                num_connectors++;
@@ -4676,86 +4665,42 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                                                    &clock,
                                                    &reduced_clock);
        }
+       /* Compat-code for transition, will disappear. */
+       if (!intel_crtc->config.clock_set) {
+               intel_crtc->config.dpll.n = clock.n;
+               intel_crtc->config.dpll.m1 = clock.m1;
+               intel_crtc->config.dpll.m2 = clock.m2;
+               intel_crtc->config.dpll.p1 = clock.p1;
+               intel_crtc->config.dpll.p2 = clock.p2;
+       }
 
        if (is_sdvo && is_tv)
-               i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+               i9xx_adjust_sdvo_tv_clock(intel_crtc);
 
        if (IS_GEN2(dev))
-               i8xx_update_pll(crtc, adjusted_mode, &clock,
+               i8xx_update_pll(intel_crtc, adjusted_mode,
                                has_reduced_clock ? &reduced_clock : NULL,
                                num_connectors);
        else if (IS_VALLEYVIEW(dev))
-               vlv_update_pll(crtc, mode, adjusted_mode, &clock,
-                               has_reduced_clock ? &reduced_clock : NULL,
-                               num_connectors);
+               vlv_update_pll(intel_crtc);
        else
-               i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+               i9xx_update_pll(intel_crtc,
                                has_reduced_clock ? &reduced_clock : NULL,
                                num_connectors);
 
-       /* setup pipeconf */
-       pipeconf = I915_READ(PIPECONF(pipe));
-
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
 
-       if (pipe == 0)
-               dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
-       else
-               dspcntr |= DISPPLANE_SEL_PIPE_B;
-
-       if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
-               /* Enable pixel doubling when the dot clock is > 90% of the (display)
-                * core speed.
-                *
-                * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-                * pipe == 0 check?
-                */
-               if (mode->clock >
-                   dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
-                       pipeconf |= PIPECONF_DOUBLE_WIDE;
+       if (!IS_VALLEYVIEW(dev)) {
+               if (pipe == 0)
+                       dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
                else
-                       pipeconf &= ~PIPECONF_DOUBLE_WIDE;
-       }
-
-       /* default to 8bpc */
-       pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
-       if (is_dp) {
-               if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
-                       pipeconf |= PIPECONF_6BPC |
-                                   PIPECONF_DITHER_EN |
-                                   PIPECONF_DITHER_TYPE_SP;
-               }
-       }
-
-       if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-               if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
-                       pipeconf |= PIPECONF_6BPC |
-                                       PIPECONF_ENABLE |
-                                       I965_PIPECONF_ACTIVE;
-               }
+                       dspcntr |= DISPPLANE_SEL_PIPE_B;
        }
 
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
 
-       if (HAS_PIPE_CXSR(dev)) {
-               if (intel_crtc->lowfreq_avail) {
-                       DRM_DEBUG_KMS("enabling CxSR downclocking\n");
-                       pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
-               } else {
-                       DRM_DEBUG_KMS("disabling CxSR downclocking\n");
-                       pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
-               }
-       }
-
-       pipeconf &= ~PIPECONF_INTERLACE_MASK;
-       if (!IS_GEN2(dev) &&
-           adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-               pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
-       else
-               pipeconf |= PIPECONF_PROGRESSIVE;
-
        intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
        /* pipesrc and dspsize control the size that is scaled from,
@@ -4766,8 +4711,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                   (mode->hdisplay - 1));
        I915_WRITE(DSPPOS(plane), 0);
 
-       I915_WRITE(PIPECONF(pipe), pipeconf);
-       POSTING_READ(PIPECONF(pipe));
+       i9xx_set_pipeconf(intel_crtc);
+
        intel_enable_pipe(dev_priv, pipe, false);
 
        intel_wait_for_vblank(dev, pipe);
@@ -4782,12 +4727,26 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        return ret;
 }
 
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+                                struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = I915_READ(PIPECONF(crtc->pipe));
+       if (!(tmp & PIPECONF_ENABLE))
+               return false;
+
+       return true;
+}
+
 static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *encoder;
-       u32 temp;
+       u32 val, final;
        bool has_lvds = false;
        bool has_cpu_edp = false;
        bool has_pch_edp = false;
@@ -4830,70 +4789,109 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
         * PCH B stepping, previous chipset stepping should be
         * ignoring this setting.
         */
-       temp = I915_READ(PCH_DREF_CONTROL);
+       val = I915_READ(PCH_DREF_CONTROL);
+
+       /* As we must carefully and slowly disable/enable each source in turn,
+        * compute the final state we want first and check if we need to
+        * make any changes at all.
+        */
+       final = val;
+       final &= ~DREF_NONSPREAD_SOURCE_MASK;
+       if (has_ck505)
+               final |= DREF_NONSPREAD_CK505_ENABLE;
+       else
+               final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+       final &= ~DREF_SSC_SOURCE_MASK;
+       final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+       final &= ~DREF_SSC1_ENABLE;
+
+       if (has_panel) {
+               final |= DREF_SSC_SOURCE_ENABLE;
+
+               if (intel_panel_use_ssc(dev_priv) && can_ssc)
+                       final |= DREF_SSC1_ENABLE;
+
+               if (has_cpu_edp) {
+                       if (intel_panel_use_ssc(dev_priv) && can_ssc)
+                               final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+                       else
+                               final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+               } else
+                       final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+       } else {
+               final |= DREF_SSC_SOURCE_DISABLE;
+               final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+       }
+
+       if (final == val)
+               return;
+
        /* Always enable nonspread source */
-       temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+       val &= ~DREF_NONSPREAD_SOURCE_MASK;
 
        if (has_ck505)
-               temp |= DREF_NONSPREAD_CK505_ENABLE;
+               val |= DREF_NONSPREAD_CK505_ENABLE;
        else
-               temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+               val |= DREF_NONSPREAD_SOURCE_ENABLE;
 
        if (has_panel) {
-               temp &= ~DREF_SSC_SOURCE_MASK;
-               temp |= DREF_SSC_SOURCE_ENABLE;
+               val &= ~DREF_SSC_SOURCE_MASK;
+               val |= DREF_SSC_SOURCE_ENABLE;
 
                /* SSC must be turned on before enabling the CPU output  */
                if (intel_panel_use_ssc(dev_priv) && can_ssc) {
                        DRM_DEBUG_KMS("Using SSC on panel\n");
-                       temp |= DREF_SSC1_ENABLE;
+                       val |= DREF_SSC1_ENABLE;
                } else
-                       temp &= ~DREF_SSC1_ENABLE;
+                       val &= ~DREF_SSC1_ENABLE;
 
                /* Get SSC going before enabling the outputs */
-               I915_WRITE(PCH_DREF_CONTROL, temp);
+               I915_WRITE(PCH_DREF_CONTROL, val);
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
 
-               temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
                /* Enable CPU source on CPU attached eDP */
                if (has_cpu_edp) {
                        if (intel_panel_use_ssc(dev_priv) && can_ssc) {
                                DRM_DEBUG_KMS("Using SSC on eDP\n");
-                               temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+                               val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
                        }
                        else
-                               temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+                               val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
                } else
-                       temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+                       val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
-               I915_WRITE(PCH_DREF_CONTROL, temp);
+               I915_WRITE(PCH_DREF_CONTROL, val);
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
        } else {
                DRM_DEBUG_KMS("Disabling SSC entirely\n");
 
-               temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+               val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
                /* Turn off CPU output */
-               temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+               val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
-               I915_WRITE(PCH_DREF_CONTROL, temp);
+               I915_WRITE(PCH_DREF_CONTROL, val);
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
 
                /* Turn off the SSC source */
-               temp &= ~DREF_SSC_SOURCE_MASK;
-               temp |= DREF_SSC_SOURCE_DISABLE;
+               val &= ~DREF_SSC_SOURCE_MASK;
+               val |= DREF_SSC_SOURCE_DISABLE;
 
                /* Turn off SSC1 */
-               temp &= ~ DREF_SSC1_ENABLE;
+               val &= ~DREF_SSC1_ENABLE;
 
-               I915_WRITE(PCH_DREF_CONTROL, temp);
+               I915_WRITE(PCH_DREF_CONTROL, val);
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
        }
+
+       BUG_ON(val != final);
 }
 
 /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
@@ -5118,7 +5116,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
        val = I915_READ(PIPECONF(pipe));
 
        val &= ~PIPECONF_BPC_MASK;
-       switch (intel_crtc->bpp) {
+       switch (intel_crtc->config.pipe_bpp) {
        case 18:
                val |= PIPECONF_6BPC;
                break;
@@ -5146,7 +5144,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
        else
                val |= PIPECONF_PROGRESSIVE;
 
-       if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+       if (intel_crtc->config.limited_color_range)
                val |= PIPECONF_COLOR_RANGE_SELECT;
        else
                val &= ~PIPECONF_COLOR_RANGE_SELECT;
@@ -5162,8 +5160,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
  * is supported, but eventually this should handle various
  * RGB<->YCbCr scenarios as well.
  */
-static void intel_set_pipe_csc(struct drm_crtc *crtc,
-                              const struct drm_display_mode *adjusted_mode)
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5178,7 +5175,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
         * consideration.
         */
 
-       if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+       if (intel_crtc->config.limited_color_range)
                coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
 
        /*
@@ -5202,7 +5199,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
        if (INTEL_INFO(dev)->gen > 6) {
                uint16_t postoff = 0;
 
-               if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+               if (intel_crtc->config.limited_color_range)
                        postoff = (16 * (1 << 13) / 255) & 0x1fff;
 
                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -5213,7 +5210,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc,
        } else {
                uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-               if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+               if (intel_crtc->config.limited_color_range)
                        mode |= CSC_BLACK_SCREEN_OFFSET;
 
                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -5303,7 +5300,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
        }
 
        if (is_sdvo && is_tv)
-               i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+               i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
 
        return true;
 }
@@ -5344,7 +5341,7 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
                return false;
        }
 
-       if (dev_priv->num_pipe == 2)
+       if (INTEL_INFO(dev)->num_pipes == 2)
                return true;
 
        switch (intel_crtc->pipe) {
@@ -5401,77 +5398,77 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
        return bps / (link_bw * 8) + 1;
 }
 
-static void ironlake_set_m_n(struct drm_crtc *crtc,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adjusted_mode)
+void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+                                 struct intel_link_m_n *m_n)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-       struct intel_encoder *intel_encoder, *edp_encoder = NULL;
-       struct intel_link_m_n m_n = {0};
-       int target_clock, pixel_multiplier, lane, link_bw;
-       bool is_dp = false, is_cpu_edp = false;
+       int pipe = crtc->pipe;
 
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-               switch (intel_encoder->type) {
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       is_dp = true;
-                       break;
-               case INTEL_OUTPUT_EDP:
-                       is_dp = true;
-                       if (!intel_encoder_is_pch_edp(&intel_encoder->base))
-                               is_cpu_edp = true;
-                       edp_encoder = intel_encoder;
-                       break;
-               }
-       }
+       I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+       I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
+       I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
+       I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
+}
+
+void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+                                 struct intel_link_m_n *m_n)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = crtc->pipe;
+       enum transcoder transcoder = crtc->cpu_transcoder;
 
-       /* FDI link */
-       pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-       lane = 0;
-       /* CPU eDP doesn't require FDI link, so just set DP M/N
-          according to current link config */
-       if (is_cpu_edp) {
-               intel_edp_link_config(edp_encoder, &lane, &link_bw);
+       if (INTEL_INFO(dev)->gen >= 5) {
+               I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+               I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+               I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+               I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
        } else {
-               /* FDI is a binary signal running at ~2.7GHz, encoding
-                * each output octet as 10 bits. The actual frequency
-                * is stored as a divider into a 100MHz clock, and the
-                * mode pixel clock is stored in units of 1KHz.
-                * Hence the bw of each lane in terms of the mode signal
-                * is:
-                */
-               link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+               I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+               I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
+               I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
+               I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
        }
+}
 
-       /* [e]DP over FDI requires target mode clock instead of link clock. */
-       if (edp_encoder)
-               target_clock = intel_edp_target_clock(edp_encoder, mode);
-       else if (is_dp)
-               target_clock = mode->clock;
+static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct intel_link_m_n m_n = {0};
+       int target_clock, lane, link_bw;
+
+       /* FDI is a binary signal running at ~2.7GHz, encoding
+        * each output octet as 10 bits. The actual frequency
+        * is stored as a divider into a 100MHz clock, and the
+        * mode pixel clock is stored in units of 1KHz.
+        * Hence the bw of each lane in terms of the mode signal
+        * is:
+        */
+       link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+       if (intel_crtc->config.pixel_target_clock)
+               target_clock = intel_crtc->config.pixel_target_clock;
        else
                target_clock = adjusted_mode->clock;
 
-       if (!lane)
-               lane = ironlake_get_lanes_required(target_clock, link_bw,
-                                                  intel_crtc->bpp);
+       lane = ironlake_get_lanes_required(target_clock, link_bw,
+                                          intel_crtc->config.pipe_bpp);
 
        intel_crtc->fdi_lanes = lane;
 
-       if (pixel_multiplier > 1)
-               link_bw *= pixel_multiplier;
-       intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
+       if (intel_crtc->config.pixel_multiplier > 1)
+               link_bw *= intel_crtc->config.pixel_multiplier;
+       intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
+                              link_bw, &m_n);
 
-       I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
-       I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
-       I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
-       I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+       intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
 }
 
 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
-                                     struct drm_display_mode *adjusted_mode,
                                      intel_clock_t *clock, u32 fp)
 {
        struct drm_crtc *crtc = &intel_crtc->base;
@@ -5479,9 +5476,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        uint32_t dpll;
-       int factor, pixel_multiplier, num_connectors = 0;
+       int factor, num_connectors = 0;
        bool is_lvds = false, is_sdvo = false, is_tv = false;
-       bool is_dp = false, is_cpu_edp = false;
 
        for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
                switch (intel_encoder->type) {
@@ -5497,14 +5493,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                case INTEL_OUTPUT_TVOUT:
                        is_tv = true;
                        break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       is_dp = true;
-                       break;
-               case INTEL_OUTPUT_EDP:
-                       is_dp = true;
-                       if (!intel_encoder_is_pch_edp(&intel_encoder->base))
-                               is_cpu_edp = true;
-                       break;
                }
 
                num_connectors++;
@@ -5530,13 +5518,14 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
        if (is_sdvo) {
-               pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
-               if (pixel_multiplier > 1) {
-                       dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+               if (intel_crtc->config.pixel_multiplier > 1) {
+                       dpll |= (intel_crtc->config.pixel_multiplier - 1)
+                               << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
                }
                dpll |= DPLL_DVO_HIGH_SPEED;
        }
-       if (is_dp && !is_cpu_edp)
+       if (intel_crtc->config.has_dp_encoder &&
+           intel_crtc->config.has_pch_encoder)
                dpll |= DPLL_DVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -5574,21 +5563,22 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 }
 
 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
-                                 struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode,
                                  int x, int y,
                                  struct drm_framebuffer *fb)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        int num_connectors = 0;
        intel_clock_t clock, reduced_clock;
        u32 dpll, fp = 0, fp2 = 0;
        bool ok, has_reduced_clock = false;
-       bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+       bool is_lvds = false;
        struct intel_encoder *encoder;
        int ret;
        bool dither, fdi_config_ok;
@@ -5598,14 +5588,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
                        break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       is_dp = true;
-                       break;
-               case INTEL_OUTPUT_EDP:
-                       is_dp = true;
-                       if (!intel_encoder_is_pch_edp(&encoder->base))
-                               is_cpu_edp = true;
-                       break;
                }
 
                num_connectors++;
@@ -5614,19 +5596,28 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
             "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
+       intel_crtc->cpu_transcoder = pipe;
+
        ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
                                     &has_reduced_clock, &reduced_clock);
        if (!ok) {
                DRM_ERROR("Couldn't find PLL settings for mode!\n");
                return -EINVAL;
        }
+       /* Compat-code for transition, will disappear. */
+       if (!intel_crtc->config.clock_set) {
+               intel_crtc->config.dpll.n = clock.n;
+               intel_crtc->config.dpll.m1 = clock.m1;
+               intel_crtc->config.dpll.m2 = clock.m2;
+               intel_crtc->config.dpll.p1 = clock.p1;
+               intel_crtc->config.dpll.p2 = clock.p2;
+       }
 
        /* Ensure that the cursor is valid for the new mode before changing... */
        intel_crtc_update_cursor(crtc, true);
 
        /* determine panel color depth */
-       dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
-                                             adjusted_mode);
+       dither = intel_crtc->config.dither;
        if (is_lvds && dev_priv->lvds_dither)
                dither = true;
 
@@ -5635,13 +5626,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
                        reduced_clock.m2;
 
-       dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+       dpll = ironlake_compute_dpll(intel_crtc, &clock, fp);
 
        DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
        drm_mode_debug_printmodeline(mode);
 
        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
-       if (!is_cpu_edp) {
+       if (intel_crtc->config.has_pch_encoder) {
                struct intel_pch_pll *pll;
 
                pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5653,8 +5644,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        } else
                intel_put_pch_pll(intel_crtc);
 
-       if (is_dp && !is_cpu_edp)
-               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       if (intel_crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_pll_enable)
@@ -5689,7 +5680,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        /* Note, this also computes intel_crtc->fdi_lanes which is used below in
         * ironlake_check_fdi_lanes. */
-       ironlake_set_m_n(crtc, mode, adjusted_mode);
+       intel_crtc->fdi_lanes = 0;
+       if (intel_crtc->config.has_pch_encoder)
+               ironlake_fdi_set_m_n(crtc);
 
        fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
@@ -5710,6 +5703,23 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        return fdi_config_ok ? ret : -EINVAL;
 }
 
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = I915_READ(PIPECONF(crtc->pipe));
+       if (!(tmp & PIPECONF_ENABLE))
+               return false;
+
+       if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
+               pipe_config->has_pch_encoder = true;
+
+       return true;
+}
+
 static void haswell_modeset_global_resources(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5740,29 +5750,26 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
 }
 
 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
-                                struct drm_display_mode *mode,
-                                struct drm_display_mode *adjusted_mode,
                                 int x, int y,
                                 struct drm_framebuffer *fb)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        int num_connectors = 0;
-       bool is_dp = false, is_cpu_edp = false;
+       bool is_cpu_edp = false;
        struct intel_encoder *encoder;
        int ret;
        bool dither;
 
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                switch (encoder->type) {
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       is_dp = true;
-                       break;
                case INTEL_OUTPUT_EDP:
-                       is_dp = true;
                        if (!intel_encoder_is_pch_edp(&encoder->base))
                                is_cpu_edp = true;
                        break;
@@ -5795,25 +5802,24 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
        intel_crtc_update_cursor(crtc, true);
 
        /* determine panel color depth */
-       dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
-                                             adjusted_mode);
+       dither = intel_crtc->config.dither;
 
        DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
        drm_mode_debug_printmodeline(mode);
 
-       if (is_dp && !is_cpu_edp)
-               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       if (intel_crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(intel_crtc);
 
        intel_crtc->lowfreq_avail = false;
 
        intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
-       if (!is_dp || is_cpu_edp)
-               ironlake_set_m_n(crtc, mode, adjusted_mode);
+       if (intel_crtc->config.has_pch_encoder)
+               ironlake_fdi_set_m_n(crtc);
 
        haswell_set_pipeconf(crtc, adjusted_mode, dither);
 
-       intel_set_pipe_csc(crtc, adjusted_mode);
+       intel_set_pipe_csc(crtc);
 
        /* Set up the display plane register */
        I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
@@ -5828,9 +5834,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
        return ret;
 }
 
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t tmp;
+
+       tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
+       if (!(tmp & PIPECONF_ENABLE))
+               return false;
+
+       /*
+        * aswell has only FDI/PCH transcoder A. It is which is connected to
+        * DDI E. So just check whether this pipe is wired to DDI E and whether
+        * the PCH transcoder is on.
+        */
+       tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
+       if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
+           I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
+               pipe_config->has_pch_encoder = true;
+
+
+       return true;
+}
+
 static int intel_crtc_mode_set(struct drm_crtc *crtc,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode,
                               int x, int y,
                               struct drm_framebuffer *fb)
 {
@@ -5839,13 +5868,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        struct drm_encoder_helper_funcs *encoder_funcs;
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int ret;
 
        drm_vblank_pre_modeset(dev, pipe);
 
-       ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
-                                             x, y, fb);
+       ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
+
        drm_vblank_post_modeset(dev, pipe);
 
        if (ret != 0)
@@ -5856,8 +5888,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        encoder->base.base.id,
                        drm_get_encoder_name(&encoder->base),
                        mode->base.id, mode->name);
-               encoder_funcs = encoder->base.helper_private;
-               encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+               if (encoder->mode_set) {
+                       encoder->mode_set(encoder);
+               } else {
+                       encoder_funcs = encoder->base.helper_private;
+                       encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+               }
        }
 
        return 0;
@@ -6325,13 +6361,24 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
        if (!dev_priv->info->cursor_needs_physical) {
+               unsigned alignment;
+
                if (obj->tiling_mode) {
                        DRM_ERROR("cursor cannot be tiled\n");
                        ret = -EINVAL;
                        goto fail_locked;
                }
 
-               ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
+               /* Note that the w/a also requires 2 PTE of padding following
+                * the bo. We currently fill all unused PTE with the shadow
+                * page and so we should always have valid PTE following the
+                * cursor preventing the VT-d warning.
+                */
+               alignment = 0;
+               if (need_vtd_wa(dev))
+                       alignment = 64*1024;
+
+               ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
                if (ret) {
                        DRM_ERROR("failed to move cursor bo into the GTT\n");
                        goto fail_locked;
@@ -6436,20 +6483,6 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
        intel_crtc_load_lut(crtc);
 }
 
-/**
- * Get a pipe with a simple mode set on it for doing load-based monitor
- * detection.
- *
- * It will be up to the load-detect code to adjust the pipe as appropriate for
- * its requirements.  The pipe will be connected to no other encoders.
- *
- * Currently this code will only succeed if there is a pipe with no encoders
- * configured for it.  In the future, it could choose to temporarily disable
- * some outputs to free up a pipe for its use.
- *
- * \return crtc, or NULL if no pipes are available.
- */
-
 /* VESA 640x480x72Hz mode to set on the pipe */
 static struct drm_display_mode load_detect_mode = {
        DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -6954,7 +6987,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       struct drm_i915_gem_object *obj;
        unsigned long flags;
 
        /* Ignore early vblank irqs */
@@ -6984,8 +7016,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       obj = work->old_fb_obj;
-
        wake_up_all(&dev_priv->pending_flip_queue);
 
        queue_work(dev_priv->wq, &work->work);
@@ -7473,19 +7503,93 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
        }
 }
 
-static struct drm_display_mode *
-intel_modeset_adjusted_mode(struct drm_crtc *crtc,
-                           struct drm_display_mode *mode)
+static int
+pipe_config_set_bpp(struct drm_crtc *crtc,
+                   struct drm_framebuffer *fb,
+                   struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_connector *connector;
+       int bpp;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_C8:
+               bpp = 8*3; /* since we go through a colormap */
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               /* checked in intel_framebuffer_init already */
+               if (WARN_ON(INTEL_INFO(dev)->gen > 3))
+                       return -EINVAL;
+       case DRM_FORMAT_RGB565:
+               bpp = 6*3; /* min is 18bpp */
+               break;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               /* checked in intel_framebuffer_init already */
+               if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+                       return -EINVAL;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               bpp = 8*3;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+               /* checked in intel_framebuffer_init already */
+               if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+                       return -EINVAL;
+               bpp = 10*3;
+               break;
+       /* TODO: gen4+ supports 16 bpc floating point, too. */
+       default:
+               DRM_DEBUG_KMS("unsupported depth\n");
+               return -EINVAL;
+       }
+
+       pipe_config->pipe_bpp = bpp;
+
+       /* Clamp display bpp to EDID value */
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               if (connector->encoder && connector->encoder->crtc != crtc)
+                       continue;
+
+               /* Don't use an invalid EDID bpc value */
+               if (connector->display_info.bpc &&
+                   connector->display_info.bpc * 3 < bpp) {
+                       DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+                                     bpp, connector->display_info.bpc*3);
+                       pipe_config->pipe_bpp = connector->display_info.bpc*3;
+               }
+       }
+
+       return bpp;
+}
+
+static struct intel_crtc_config *
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb,
+                         struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_display_mode *adjusted_mode;
        struct drm_encoder_helper_funcs *encoder_funcs;
        struct intel_encoder *encoder;
+       struct intel_crtc_config *pipe_config;
+       int plane_bpp;
 
-       adjusted_mode = drm_mode_duplicate(dev, mode);
-       if (!adjusted_mode)
+       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+       if (!pipe_config)
                return ERR_PTR(-ENOMEM);
 
+       drm_mode_copy(&pipe_config->adjusted_mode, mode);
+       drm_mode_copy(&pipe_config->requested_mode, mode);
+
+       plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
+       if (plane_bpp < 0)
+               goto fail;
+
        /* Pass our mode to the connectors and the CRTC to give them a chance to
         * adjust it according to limitations or connector properties, and also
         * a chance to reject the mode entirely.
@@ -7495,23 +7599,38 @@ intel_modeset_adjusted_mode(struct drm_crtc *crtc,
 
                if (&encoder->new_crtc->base != crtc)
                        continue;
+
+               if (encoder->compute_config) {
+                       if (!(encoder->compute_config(encoder, pipe_config))) {
+                               DRM_DEBUG_KMS("Encoder config failure\n");
+                               goto fail;
+                       }
+
+                       continue;
+               }
+
                encoder_funcs = encoder->base.helper_private;
-               if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
-                                               adjusted_mode))) {
+               if (!(encoder_funcs->mode_fixup(&encoder->base,
+                                               &pipe_config->requested_mode,
+                                               &pipe_config->adjusted_mode))) {
                        DRM_DEBUG_KMS("Encoder fixup failed\n");
                        goto fail;
                }
        }
 
-       if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+       if (!(intel_crtc_compute_config(crtc, pipe_config))) {
                DRM_DEBUG_KMS("CRTC fixup failed\n");
                goto fail;
        }
        DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
-       return adjusted_mode;
+       pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+       DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+                     plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+       return pipe_config;
 fail:
-       drm_mode_destroy(dev, adjusted_mode);
+       kfree(pipe_config);
        return ERR_PTR(-EINVAL);
 }
 
@@ -7673,12 +7792,29 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
                            base.head) \
                if (mask & (1 <<(intel_crtc)->pipe)) \
 
+static bool
+intel_pipe_config_compare(struct intel_crtc_config *current_config,
+                         struct intel_crtc_config *pipe_config)
+{
+       if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
+               DRM_ERROR("mismatch in has_pch_encoder "
+                         "(expected %i, found %i)\n",
+                         current_config->has_pch_encoder,
+                         pipe_config->has_pch_encoder);
+               return false;
+       }
+
+       return true;
+}
+
 void
 intel_modeset_check_state(struct drm_device *dev)
 {
+       drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
+       struct intel_crtc_config pipe_config;
 
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
@@ -7767,7 +7903,16 @@ intel_modeset_check_state(struct drm_device *dev)
                     "crtc's computed enabled state doesn't match tracked enabled state "
                     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
 
-               assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+               memset(&pipe_config, 0, sizeof(pipe_config));
+               active = dev_priv->display.get_pipe_config(crtc,
+                                                          &pipe_config);
+               WARN(crtc->active != active,
+                    "crtc active state doesn't match with hw state "
+                    "(expected %i, found %i)\n", crtc->active, active);
+
+               WARN(active &&
+                    !intel_pipe_config_compare(&crtc->config, &pipe_config),
+                    "pipe state doesn't match!\n");
        }
 }
 
@@ -7777,7 +7922,8 @@ int intel_set_mode(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
+       struct drm_display_mode *saved_mode, *saved_hwmode;
+       struct intel_crtc_config *pipe_config = NULL;
        struct intel_crtc *intel_crtc;
        unsigned disable_pipes, prepare_pipes, modeset_pipes;
        int ret = 0;
@@ -7790,12 +7936,6 @@ int intel_set_mode(struct drm_crtc *crtc,
        intel_modeset_affected_pipes(crtc, &modeset_pipes,
                                     &prepare_pipes, &disable_pipes);
 
-       DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
-                     modeset_pipes, prepare_pipes, disable_pipes);
-
-       for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
-               intel_crtc_disable(&intel_crtc->base);
-
        *saved_hwmode = crtc->hwmode;
        *saved_mode = crtc->mode;
 
@@ -7804,15 +7944,22 @@ int intel_set_mode(struct drm_crtc *crtc,
         * Hence simply check whether any bit is set in modeset_pipes in all the
         * pieces of code that are not yet converted to deal with mutliple crtcs
         * changing their mode at the same time. */
-       adjusted_mode = NULL;
        if (modeset_pipes) {
-               adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
-               if (IS_ERR(adjusted_mode)) {
-                       ret = PTR_ERR(adjusted_mode);
+               pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+               if (IS_ERR(pipe_config)) {
+                       ret = PTR_ERR(pipe_config);
+                       pipe_config = NULL;
+
                        goto out;
                }
        }
 
+       DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+                     modeset_pipes, prepare_pipes, disable_pipes);
+
+       for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+               intel_crtc_disable(&intel_crtc->base);
+
        for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
                if (intel_crtc->base.enabled)
                        dev_priv->display.crtc_disable(&intel_crtc->base);
@@ -7821,8 +7968,12 @@ int intel_set_mode(struct drm_crtc *crtc,
        /* crtc->mode is already used by the ->mode_set callbacks, hence we need
         * to set it here already despite that we pass it down the callchain.
         */
-       if (modeset_pipes)
+       if (modeset_pipes) {
                crtc->mode = *mode;
+               /* mode_set/enable/disable functions rely on a correct pipe
+                * config. */
+               to_intel_crtc(crtc)->config = *pipe_config;
+       }
 
        /* Only after disabling all output pipelines that will be changed can we
         * update the the output configuration. */
@@ -7836,7 +7987,6 @@ int intel_set_mode(struct drm_crtc *crtc,
         */
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
                ret = intel_crtc_mode_set(&intel_crtc->base,
-                                         mode, adjusted_mode,
                                          x, y, fb);
                if (ret)
                        goto done;
@@ -7848,7 +7998,7 @@ int intel_set_mode(struct drm_crtc *crtc,
 
        if (modeset_pipes) {
                /* Store real post-adjustment hardware mode. */
-               crtc->hwmode = *adjusted_mode;
+               crtc->hwmode = pipe_config->adjusted_mode;
 
                /* Calculate and store various constants which
                 * are later needed by vblank and swap-completion
@@ -7859,7 +8009,6 @@ int intel_set_mode(struct drm_crtc *crtc,
 
        /* FIXME: add subpixel order */
 done:
-       drm_mode_destroy(dev, adjusted_mode);
        if (ret && crtc->enabled) {
                crtc->hwmode = *saved_hwmode;
                crtc->mode = *saved_mode;
@@ -7868,6 +8017,7 @@ done:
        }
 
 out:
+       kfree(pipe_config);
        kfree(saved_mode);
        return ret;
 }
@@ -7959,10 +8109,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                        config->mode_changed = true;
                } else if (set->fb == NULL) {
                        config->mode_changed = true;
-               } else if (set->fb->depth != set->crtc->fb->depth) {
-                       config->mode_changed = true;
-               } else if (set->fb->bits_per_pixel !=
-                          set->crtc->fb->bits_per_pixel) {
+               } else if (set->fb->pixel_format !=
+                          set->crtc->fb->pixel_format) {
                        config->mode_changed = true;
                } else
                        config->fb_changed = true;
@@ -8145,6 +8293,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
                        goto fail;
                }
        } else if (config->fb_changed) {
+               intel_crtc_wait_for_pending_flips(set->crtc);
+
                ret = intel_pipe_set_base(set->crtc,
                                          set->x, set->y, set->fb);
        }
@@ -8232,8 +8382,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-       intel_crtc->bpp = 24; /* default for pre-Ironlake */
-
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 }
 
@@ -8343,20 +8491,20 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (has_edp_a(dev))
                        intel_dp_init(dev, DP_A, PORT_A);
 
-               if (I915_READ(HDMIB) & PORT_DETECTED) {
+               if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
                        /* PCH SDVOB multiplex with HDMIB */
                        found = intel_sdvo_init(dev, PCH_SDVOB, true);
                        if (!found)
-                               intel_hdmi_init(dev, HDMIB, PORT_B);
+                               intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
                        if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
                                intel_dp_init(dev, PCH_DP_B, PORT_B);
                }
 
-               if (I915_READ(HDMIC) & PORT_DETECTED)
-                       intel_hdmi_init(dev, HDMIC, PORT_C);
+               if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+                       intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
 
-               if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
-                       intel_hdmi_init(dev, HDMID, PORT_D);
+               if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+                       intel_hdmi_init(dev, PCH_HDMID, PORT_D);
 
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_C, PORT_C);
@@ -8368,24 +8516,21 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
                        intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
 
-               if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
-                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
+               if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+                                       PORT_B);
                        if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
                                intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
                }
-
-               if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
-                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
-
        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
                bool found = false;
 
-               if (I915_READ(SDVOB) & SDVO_DETECTED) {
+               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
                        DRM_DEBUG_KMS("probing SDVOB\n");
-                       found = intel_sdvo_init(dev, SDVOB, true);
+                       found = intel_sdvo_init(dev, GEN3_SDVOB, true);
                        if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
                                DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-                               intel_hdmi_init(dev, SDVOB, PORT_B);
+                               intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
                        }
 
                        if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
@@ -8396,16 +8541,16 @@ static void intel_setup_outputs(struct drm_device *dev)
 
                /* Before G4X SDVOC doesn't have its own detect register */
 
-               if (I915_READ(SDVOB) & SDVO_DETECTED) {
+               if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
                        DRM_DEBUG_KMS("probing SDVOC\n");
-                       found = intel_sdvo_init(dev, SDVOC, false);
+                       found = intel_sdvo_init(dev, GEN3_SDVOC, false);
                }
 
-               if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+               if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
                        if (SUPPORTS_INTEGRATED_HDMI(dev)) {
                                DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-                               intel_hdmi_init(dev, SDVOC, PORT_C);
+                               intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
                        }
                        if (SUPPORTS_INTEGRATED_DP(dev)) {
                                DRM_DEBUG_KMS("probing DP_C\n");
@@ -8572,20 +8717,22 @@ static void intel_init_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* We always want a DPMS function */
        if (HAS_DDI(dev)) {
+               dev_priv->display.get_pipe_config = haswell_get_pipe_config;
                dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.off = haswell_crtc_off;
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
                dev_priv->display.update_plane = ironlake_update_plane;
        } else {
+               dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -8828,7 +8975,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i, ret;
+       int i, j, ret;
 
        drm_mode_config_init(dev);
 
@@ -8859,13 +9006,17 @@ void intel_modeset_init(struct drm_device *dev)
        dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
 
        DRM_DEBUG_KMS("%d display pipe%s available.\n",
-                     dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
+                     INTEL_INFO(dev)->num_pipes,
+                     INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
-       for (i = 0; i < dev_priv->num_pipe; i++) {
+       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
                intel_crtc_init(dev, i);
-               ret = intel_plane_init(dev, i);
-               if (ret)
-                       DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
+               for (j = 0; j < dev_priv->num_plane; j++) {
+                       ret = intel_plane_init(dev, i, j);
+                       if (ret)
+                               DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
+                                             i, j, ret);
+               }
        }
 
        intel_cpu_pll_init(dev);
@@ -8918,10 +9069,11 @@ static void intel_enable_pipe_a(struct drm_device *dev)
 static bool
 intel_check_plane_mapping(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg, val;
 
-       if (dev_priv->num_pipe == 1)
+       if (INTEL_INFO(dev)->num_pipes == 1)
                return true;
 
        reg = DSPCNTR(!crtc->plane);
@@ -9077,6 +9229,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
        u32 tmp;
+       struct drm_plane *plane;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
@@ -9096,6 +9249,13 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                        case TRANS_DDI_EDP_INPUT_C_ONOFF:
                                pipe = PIPE_C;
                                break;
+                       default:
+                               /* A bogus value has been programmed, disable
+                                * the transcoder */
+                               WARN(1, "Bogus eDP source %08x\n", tmp);
+                               intel_ddi_disable_transcoder_func(dev_priv,
+                                               TRANSCODER_EDP);
+                               goto setup_pipes;
                        }
 
                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -9106,14 +9266,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                }
        }
 
-       for_each_pipe(pipe) {
-               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
-               tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
-               if (tmp & PIPECONF_ENABLE)
-                       crtc->active = true;
-               else
-                       crtc->active = false;
+setup_pipes:
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               memset(&crtc->config, 0, sizeof(crtc->config));
+               crtc->active = dev_priv->display.get_pipe_config(crtc,
+                                                                &crtc->config);
 
                crtc->base.enabled = crtc->active;
 
@@ -9173,8 +9331,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
        if (force_restore) {
                for_each_pipe(pipe) {
-                       intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
+                       struct drm_crtc *crtc =
+                               dev_priv->pipe_to_crtc_mapping[pipe];
+                       intel_crtc_restore_mode(crtc);
                }
+               list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+                       intel_plane_restore(plane);
 
                i915_redisable_vga(dev);
        } else {
@@ -9323,15 +9485,24 @@ intel_display_capture_error_state(struct drm_device *dev)
        for_each_pipe(i) {
                cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
 
-               error->cursor[i].control = I915_READ(CURCNTR(i));
-               error->cursor[i].position = I915_READ(CURPOS(i));
-               error->cursor[i].base = I915_READ(CURBASE(i));
+               if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
+                       error->cursor[i].control = I915_READ(CURCNTR(i));
+                       error->cursor[i].position = I915_READ(CURPOS(i));
+                       error->cursor[i].base = I915_READ(CURBASE(i));
+               } else {
+                       error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
+                       error->cursor[i].position = I915_READ(CURPOS_IVB(i));
+                       error->cursor[i].base = I915_READ(CURBASE_IVB(i));
+               }
 
                error->plane[i].control = I915_READ(DSPCNTR(i));
                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-               error->plane[i].size = I915_READ(DSPSIZE(i));
-               error->plane[i].pos = I915_READ(DSPPOS(i));
-               error->plane[i].addr = I915_READ(DSPADDR(i));
+               if (INTEL_INFO(dev)->gen <= 3) {
+                       error->plane[i].size = I915_READ(DSPSIZE(i));
+                       error->plane[i].pos = I915_READ(DSPPOS(i));
+               }
+               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+                       error->plane[i].addr = I915_READ(DSPADDR(i));
                if (INTEL_INFO(dev)->gen >= 4) {
                        error->plane[i].surface = I915_READ(DSPSURF(i));
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
@@ -9355,10 +9526,9 @@ intel_display_print_error_state(struct seq_file *m,
                                struct drm_device *dev,
                                struct intel_display_error_state *error)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
 
-       seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+       seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
        for_each_pipe(i) {
                seq_printf(m, "Pipe [%d]:\n", i);
                seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
@@ -9373,9 +9543,12 @@ intel_display_print_error_state(struct seq_file *m,
                seq_printf(m, "Plane [%d]:\n", i);
                seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
                seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
-               seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
-               seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
-               seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+               if (INTEL_INFO(dev)->gen <= 3) {
+                       seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+                       seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
+               }
+               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+                       seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
                if (INTEL_INFO(dev)->gen >= 4) {
                        seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
                        seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
index d7d4afe013417f489934dbab8e5bafbf48614aa9..b30e82b984399bade748e3bf739d0881ac59498f 100644 (file)
@@ -109,29 +109,6 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
 
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 
-void
-intel_edp_link_config(struct intel_encoder *intel_encoder,
-                      int *lane_num, int *link_bw)
-{
-       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
-       *lane_num = intel_dp->lane_count;
-       *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
-}
-
-int
-intel_edp_target_clock(struct intel_encoder *intel_encoder,
-                      struct drm_display_mode *mode)
-{
-       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
-       struct intel_connector *intel_connector = intel_dp->attached_connector;
-
-       if (intel_connector->panel.fixed_mode)
-               return intel_connector->panel.fixed_mode->clock;
-       else
-               return mode->clock;
-}
-
 static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
@@ -177,34 +154,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
        return (max_link_clock * max_lanes * 8) / 10;
 }
 
-static bool
-intel_dp_adjust_dithering(struct intel_dp *intel_dp,
-                         struct drm_display_mode *mode,
-                         bool adjust_mode)
-{
-       int max_link_clock =
-               drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
-       int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
-       int max_rate, mode_rate;
-
-       mode_rate = intel_dp_link_required(mode->clock, 24);
-       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
-       if (mode_rate > max_rate) {
-               mode_rate = intel_dp_link_required(mode->clock, 18);
-               if (mode_rate > max_rate)
-                       return false;
-
-               if (adjust_mode)
-                       mode->private_flags
-                               |= INTEL_MODE_DP_FORCE_6BPC;
-
-               return true;
-       }
-
-       return true;
-}
-
 static int
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
@@ -212,6 +161,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       int target_clock = mode->clock;
+       int max_rate, mode_rate, max_lanes, max_link_clock;
 
        if (is_edp(intel_dp) && fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
@@ -219,9 +170,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
 
                if (mode->vdisplay > fixed_mode->vdisplay)
                        return MODE_PANEL;
+
+               target_clock = fixed_mode->clock;
        }
 
-       if (!intel_dp_adjust_dithering(intel_dp, mode, false))
+       max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+       max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+       mode_rate = intel_dp_link_required(target_clock, 18);
+
+       if (mode_rate > max_rate)
                return MODE_CLOCK_HIGH;
 
        if (mode->clock < 10000)
@@ -294,16 +253,20 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_stat_reg;
 
-       return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
+       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+       return (I915_READ(pp_stat_reg) & PP_ON) != 0;
 }
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_ctrl_reg;
 
-       return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
 }
 
 static void
@@ -311,14 +274,19 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_stat_reg, pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
+
+       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
        if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
                WARN(1, "eDP powered off while attempting aux channel communication.\n");
                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
-                             I915_READ(PCH_PP_STATUS),
-                             I915_READ(PCH_PP_CONTROL));
+                               I915_READ(pp_stat_reg),
+                               I915_READ(pp_ctrl_reg));
        }
 }
 
@@ -328,29 +296,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t ch_ctl = intel_dp->output_reg + 0x10;
+       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t status;
        bool done;
 
-       if (IS_HASWELL(dev)) {
-               switch (intel_dig_port->port) {
-               case PORT_A:
-                       ch_ctl = DPA_AUX_CH_CTL;
-                       break;
-               case PORT_B:
-                       ch_ctl = PCH_DPB_AUX_CH_CTL;
-                       break;
-               case PORT_C:
-                       ch_ctl = PCH_DPC_AUX_CH_CTL;
-                       break;
-               case PORT_D:
-                       ch_ctl = PCH_DPD_AUX_CH_CTL;
-                       break;
-               default:
-                       BUG();
-               }
-       }
-
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
        if (has_aux_irq)
                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
@@ -370,11 +319,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
                uint8_t *recv, int recv_size)
 {
-       uint32_t output_reg = intel_dp->output_reg;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t ch_ctl = output_reg + 0x10;
+       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t ch_data = ch_ctl + 4;
        int i, ret, recv_bytes;
        uint32_t status;
@@ -388,29 +336,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         */
        pm_qos_update_request(&dev_priv->pm_qos, 0);
 
-       if (IS_HASWELL(dev)) {
-               switch (intel_dig_port->port) {
-               case PORT_A:
-                       ch_ctl = DPA_AUX_CH_CTL;
-                       ch_data = DPA_AUX_CH_DATA1;
-                       break;
-               case PORT_B:
-                       ch_ctl = PCH_DPB_AUX_CH_CTL;
-                       ch_data = PCH_DPB_AUX_CH_DATA1;
-                       break;
-               case PORT_C:
-                       ch_ctl = PCH_DPC_AUX_CH_CTL;
-                       ch_data = PCH_DPC_AUX_CH_DATA1;
-                       break;
-               case PORT_D:
-                       ch_ctl = PCH_DPD_AUX_CH_CTL;
-                       ch_data = PCH_DPD_AUX_CH_DATA1;
-                       break;
-               default:
-                       BUG();
-               }
-       }
-
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
@@ -732,18 +657,26 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
 }
 
 bool
-intel_dp_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
+intel_dp_compute_config(struct intel_encoder *encoder,
+                       struct intel_crtc_config *pipe_config)
 {
-       struct drm_device *dev = encoder->dev;
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+       struct drm_display_mode *mode = &pipe_config->requested_mode;
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
        int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
        int bpp, mode_rate;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+       int target_clock, link_avail, link_clock;
+
+       if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
+               pipe_config->has_pch_encoder = true;
+
+       pipe_config->has_dp_encoder = true;
 
        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -752,6 +685,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
                                        intel_connector->panel.fitting_mode,
                                        mode, adjusted_mode);
        }
+       /* We need to take the panel's fixed mode into account. */
+       target_clock = adjusted_mode->clock;
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                return false;
@@ -760,11 +695,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
                      "max bw %02x pixel clock %iKHz\n",
                      max_lane_count, bws[max_clock], adjusted_mode->clock);
 
-       if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
-               return false;
+       /* Walk through all bpp values. Luckily they're all nicely spaced with 2
+        * bpc in between. */
+       bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+       for (; bpp >= 6*3; bpp -= 2*3) {
+               mode_rate = intel_dp_link_required(target_clock, bpp);
+
+               for (clock = 0; clock <= max_clock; clock++) {
+                       for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+                               link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+                               link_avail = intel_dp_max_data_rate(link_clock,
+                                                                   lane_count);
+
+                               if (mode_rate <= link_avail) {
+                                       goto found;
+                               }
+                       }
+               }
+       }
 
-       bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+       return false;
 
+found:
        if (intel_dp->color_range_auto) {
                /*
                 * See:
@@ -778,104 +730,38 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
        }
 
        if (intel_dp->color_range)
-               adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
-
-       mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
-
-       for (clock = 0; clock <= max_clock; clock++) {
-               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-                       int link_bw_clock =
-                               drm_dp_bw_code_to_link_rate(bws[clock]);
-                       int link_avail = intel_dp_max_data_rate(link_bw_clock,
-                                                               lane_count);
-
-                       if (mode_rate <= link_avail) {
-                               intel_dp->link_bw = bws[clock];
-                               intel_dp->lane_count = lane_count;
-                               adjusted_mode->clock = link_bw_clock;
-                               DRM_DEBUG_KMS("DP link bw %02x lane "
-                                               "count %d clock %d bpp %d\n",
-                                      intel_dp->link_bw, intel_dp->lane_count,
-                                      adjusted_mode->clock, bpp);
-                               DRM_DEBUG_KMS("DP link bw required %i available %i\n",
-                                             mode_rate, link_avail);
-                               return true;
-                       }
-               }
-       }
-
-       return false;
-}
+               pipe_config->limited_color_range = true;
 
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                struct drm_display_mode *adjusted_mode)
-{
-       struct drm_device *dev = crtc->dev;
-       struct intel_encoder *intel_encoder;
-       struct intel_dp *intel_dp;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int lane_count = 4;
-       struct intel_link_m_n m_n;
-       int pipe = intel_crtc->pipe;
-       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-       int target_clock;
+       intel_dp->link_bw = bws[clock];
+       intel_dp->lane_count = lane_count;
+       adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+       pipe_config->pixel_target_clock = target_clock;
 
-       /*
-        * Find the lane count in the intel_encoder private
-        */
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-               intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
+                     intel_dp->link_bw, intel_dp->lane_count,
+                     adjusted_mode->clock, bpp);
+       DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+                     mode_rate, link_avail);
 
-               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-                   intel_encoder->type == INTEL_OUTPUT_EDP)
-               {
-                       lane_count = intel_dp->lane_count;
-                       break;
-               }
-       }
-
-       target_clock = mode->clock;
-       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-                       target_clock = intel_edp_target_clock(intel_encoder,
-                                                             mode);
-                       break;
-               }
-       }
+       intel_link_compute_m_n(bpp, lane_count,
+                              target_clock, adjusted_mode->clock,
+                              &pipe_config->dp_m_n);
 
        /*
-        * Compute the GMCH and Link ratios. The '3' here is
-        * the number of bytes_per_pixel post-LUT, which we always
-        * set up for 8-bits of R/G/B, or 3 bytes total.
+        * XXX: We have a strange regression where using the vbt edp bpp value
+        * for the link bw computation results in black screens, the panel only
+        * works when we do the computation at the usual 24bpp (but still
+        * requires us to use 18bpp). Until that's fully debugged, stay
+        * bug-for-bug compatible with the old code.
         */
-       intel_link_compute_m_n(intel_crtc->bpp, lane_count,
-                              target_clock, adjusted_mode->clock, &m_n);
-
-       if (IS_HASWELL(dev)) {
-               I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
-                          TU_SIZE(m_n.tu) | m_n.gmch_m);
-               I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
-               I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
-               I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
-       } else if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
-               I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
-               I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
-               I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
-       } else if (IS_VALLEYVIEW(dev)) {
-               I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
-               I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
-               I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
-               I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
-       } else {
-               I915_WRITE(PIPE_GMCH_DATA_M(pipe),
-                          TU_SIZE(m_n.tu) | m_n.gmch_m);
-               I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
-               I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
-               I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+       if (is_edp(intel_dp) && dev_priv->edp.bpp) {
+               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
+                             bpp, dev_priv->edp.bpp);
+               bpp = min_t(int, bpp, dev_priv->edp.bpp);
        }
+       pipe_config->pipe_bpp = bpp;
+
+       return true;
 }
 
 void intel_dp_init_link_config(struct intel_dp *intel_dp)
@@ -994,7 +880,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                else
                        intel_dp->DP |= DP_PLL_FREQ_270MHZ;
        } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
-               if (!HAS_PCH_SPLIT(dev))
+               if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
                        intel_dp->DP |= intel_dp->color_range;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1009,7 +895,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                if (intel_crtc->pipe == 1)
                        intel_dp->DP |= DP_PIPEB_SELECT;
 
-               if (is_cpu_edp(intel_dp)) {
+               if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                        /* don't miss out required setting for eDP */
                        if (adjusted_mode->clock < 200000)
                                intel_dp->DP |= DP_PLL_FREQ_160MHZ;
@@ -1020,7 +906,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
        }
 
-       if (is_cpu_edp(intel_dp))
+       if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
                ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 }
 
@@ -1039,16 +925,20 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_stat_reg, pp_ctrl_reg;
+
+       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
 
        DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
-                     mask, value,
-                     I915_READ(PCH_PP_STATUS),
-                     I915_READ(PCH_PP_CONTROL));
+                       mask, value,
+                       I915_READ(pp_stat_reg),
+                       I915_READ(pp_ctrl_reg));
 
-       if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
+       if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
                DRM_ERROR("Panel status timeout: status %08x control %08x\n",
-                         I915_READ(PCH_PP_STATUS),
-                         I915_READ(PCH_PP_CONTROL));
+                               I915_READ(pp_stat_reg),
+                               I915_READ(pp_ctrl_reg));
        }
 }
 
@@ -1075,9 +965,15 @@ static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
  * is locked
  */
 
-static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
 {
-       u32     control = I915_READ(PCH_PP_CONTROL);
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 control;
+       u32 pp_ctrl_reg;
+
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       control = I915_READ(pp_ctrl_reg);
 
        control &= ~PANEL_UNLOCK_MASK;
        control |= PANEL_UNLOCK_REGS;
@@ -1089,6 +985,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
+       u32 pp_stat_reg, pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
@@ -1107,13 +1004,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (!ironlake_edp_have_panel_power(intel_dp))
                ironlake_wait_panel_power_cycle(intel_dp);
 
-       pp = ironlake_get_pp_control(dev_priv);
+       pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_FORCE_VDD;
-       I915_WRITE(PCH_PP_CONTROL, pp);
-       POSTING_READ(PCH_PP_CONTROL);
-       DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
-                     I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
 
+       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
+       DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+                       I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
        /*
         * If the panel wasn't on, delay before accessing aux channel
         */
@@ -1128,19 +1028,23 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
+       u32 pp_stat_reg, pp_ctrl_reg;
 
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
        if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
-               pp = ironlake_get_pp_control(dev_priv);
+               pp = ironlake_get_pp_control(intel_dp);
                pp &= ~EDP_FORCE_VDD;
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
 
-               /* Make sure sequencer is idle before allowing subsequent activity */
-               DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
-                             I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+               pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+               pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+               I915_WRITE(pp_ctrl_reg, pp);
+               POSTING_READ(pp_ctrl_reg);
 
+               /* Make sure sequencer is idle before allowing subsequent activity */
+               DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+               I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
                msleep(intel_dp->panel_power_down_delay);
        }
 }
@@ -1184,6 +1088,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
+       u32 pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
@@ -1197,7 +1102,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 
        ironlake_wait_panel_power_cycle(intel_dp);
 
-       pp = ironlake_get_pp_control(dev_priv);
+       pp = ironlake_get_pp_control(intel_dp);
        if (IS_GEN5(dev)) {
                /* ILK workaround: disable reset around power sequence */
                pp &= ~PANEL_POWER_RESET;
@@ -1209,8 +1114,10 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        if (!IS_GEN5(dev))
                pp |= PANEL_POWER_RESET;
 
-       I915_WRITE(PCH_PP_CONTROL, pp);
-       POSTING_READ(PCH_PP_CONTROL);
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
 
        ironlake_wait_panel_on(intel_dp);
 
@@ -1226,6 +1133,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
+       u32 pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
@@ -1234,12 +1142,15 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 
        WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
-       pp = ironlake_get_pp_control(dev_priv);
+       pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
        pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
-       I915_WRITE(PCH_PP_CONTROL, pp);
-       POSTING_READ(PCH_PP_CONTROL);
+
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
 
        intel_dp->want_panel_vdd = false;
 
@@ -1253,6 +1164,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
        u32 pp;
+       u32 pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
@@ -1265,10 +1177,13 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
         * allowing it to appear.
         */
        msleep(intel_dp->backlight_on_delay);
-       pp = ironlake_get_pp_control(dev_priv);
+       pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
-       I915_WRITE(PCH_PP_CONTROL, pp);
-       POSTING_READ(PCH_PP_CONTROL);
+
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
 
        intel_panel_enable_backlight(dev, pipe);
 }
@@ -1278,6 +1193,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
+       u32 pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
@@ -1285,10 +1201,13 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
        intel_panel_disable_backlight(dev);
 
        DRM_DEBUG_KMS("\n");
-       pp = ironlake_get_pp_control(dev_priv);
+       pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_BLC_ENABLE;
-       I915_WRITE(PCH_PP_CONTROL, pp);
-       POSTING_READ(PCH_PP_CONTROL);
+
+       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+       I915_WRITE(pp_ctrl_reg, pp);
+       POSTING_READ(pp_ctrl_reg);
        msleep(intel_dp->backlight_off_delay);
 }
 
@@ -1384,7 +1303,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        if (!(tmp & DP_PORT_EN))
                return false;
 
-       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
                *pipe = PORT_TO_PIPE_CPT(tmp);
        } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
                *pipe = PORT_TO_PIPE(tmp);
@@ -1441,10 +1360,12 @@ static void intel_disable_dp(struct intel_encoder *encoder)
 static void intel_post_disable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
 
        if (is_cpu_edp(intel_dp)) {
                intel_dp_link_down(intel_dp);
-               ironlake_edp_pll_off(intel_dp);
+               if (!IS_VALLEYVIEW(dev))
+                       ironlake_edp_pll_off(intel_dp);
        }
 }
 
@@ -1470,8 +1391,9 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 static void intel_pre_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
 
-       if (is_cpu_edp(intel_dp))
+       if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
                ironlake_edp_pll_on(intel_dp);
 }
 
@@ -1548,7 +1470,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_400:
                        return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -1756,7 +1678,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
@@ -1787,7 +1709,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        int ret;
        uint32_t temp;
 
-       if (IS_HASWELL(dev)) {
+       if (HAS_DDI(dev)) {
                temp = I915_READ(DP_TP_CTL(port));
 
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2311,6 +2233,16 @@ g4x_dp_detect(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        uint32_t bit;
 
+       /* Can't disconnect eDP, but you can close the lid... */
+       if (is_edp(intel_dp)) {
+               enum drm_connector_status status;
+
+               status = intel_panel_detect(dev);
+               if (status == connector_status_unknown)
+                       status = connector_status_connected;
+               return status;
+       }
+
        switch (intel_dig_port->port) {
        case PORT_B:
                bit = PORTB_HOTPLUG_LIVE_STATUS;
@@ -2559,18 +2491,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+               mutex_lock(&dev->mode_config.mutex);
                ironlake_panel_vdd_off_sync(intel_dp);
+               mutex_unlock(&dev->mode_config.mutex);
        }
        kfree(intel_dig_port);
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
-       .mode_fixup = intel_dp_mode_fixup,
        .mode_set = intel_dp_mode_set,
 };
 
@@ -2666,15 +2600,28 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_power_seq cur, vbt, spec, final;
        u32 pp_on, pp_off, pp_div, pp;
+       int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+
+       if (HAS_PCH_SPLIT(dev)) {
+               pp_control_reg = PCH_PP_CONTROL;
+               pp_on_reg = PCH_PP_ON_DELAYS;
+               pp_off_reg = PCH_PP_OFF_DELAYS;
+               pp_div_reg = PCH_PP_DIVISOR;
+       } else {
+               pp_control_reg = PIPEA_PP_CONTROL;
+               pp_on_reg = PIPEA_PP_ON_DELAYS;
+               pp_off_reg = PIPEA_PP_OFF_DELAYS;
+               pp_div_reg = PIPEA_PP_DIVISOR;
+       }
 
        /* Workaround: Need to write PP_CONTROL with the unlock key as
         * the very first thing. */
-       pp = ironlake_get_pp_control(dev_priv);
-       I915_WRITE(PCH_PP_CONTROL, pp);
+       pp = ironlake_get_pp_control(intel_dp);
+       I915_WRITE(pp_control_reg, pp);
 
-       pp_on = I915_READ(PCH_PP_ON_DELAYS);
-       pp_off = I915_READ(PCH_PP_OFF_DELAYS);
-       pp_div = I915_READ(PCH_PP_DIVISOR);
+       pp_on = I915_READ(pp_on_reg);
+       pp_off = I915_READ(pp_off_reg);
+       pp_div = I915_READ(pp_div_reg);
 
        /* Pull timing values out of registers */
        cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -2749,7 +2696,22 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                                              struct edp_power_seq *seq)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_on, pp_off, pp_div;
+       u32 pp_on, pp_off, pp_div, port_sel = 0;
+       int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+       int pp_on_reg, pp_off_reg, pp_div_reg;
+
+       if (HAS_PCH_SPLIT(dev)) {
+               pp_on_reg = PCH_PP_ON_DELAYS;
+               pp_off_reg = PCH_PP_OFF_DELAYS;
+               pp_div_reg = PCH_PP_DIVISOR;
+       } else {
+               pp_on_reg = PIPEA_PP_ON_DELAYS;
+               pp_off_reg = PIPEA_PP_OFF_DELAYS;
+               pp_div_reg = PIPEA_PP_DIVISOR;
+       }
+
+       if (IS_VALLEYVIEW(dev))
+               port_sel = I915_READ(pp_on_reg) & 0xc0000000;
 
        /* And finally store the new values in the power sequencer. */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
@@ -2758,8 +2720,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
-       pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
-                       << PP_REFERENCE_DIVIDER_SHIFT;
+       pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
        pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
                        << PANEL_POWER_CYCLE_DELAY_SHIFT);
 
@@ -2767,19 +2728,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
         * power sequencer any more. */
        if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
                if (is_cpu_edp(intel_dp))
-                       pp_on |= PANEL_POWER_PORT_DP_A;
+                       port_sel = PANEL_POWER_PORT_DP_A;
                else
-                       pp_on |= PANEL_POWER_PORT_DP_D;
+                       port_sel = PANEL_POWER_PORT_DP_D;
        }
 
-       I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
-       I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
-       I915_WRITE(PCH_PP_DIVISOR, pp_div);
+       pp_on |= port_sel;
+
+       I915_WRITE(pp_on_reg, pp_on);
+       I915_WRITE(pp_off_reg, pp_off);
+       I915_WRITE(pp_div_reg, pp_div);
 
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
-                     I915_READ(PCH_PP_ON_DELAYS),
-                     I915_READ(PCH_PP_OFF_DELAYS),
-                     I915_READ(PCH_PP_DIVISOR));
+                     I915_READ(pp_on_reg),
+                     I915_READ(pp_off_reg),
+                     I915_READ(pp_div_reg));
 }
 
 void
@@ -2841,27 +2804,46 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
 
+       intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+       if (HAS_DDI(dev)) {
+               switch (intel_dig_port->port) {
+               case PORT_A:
+                       intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+                       break;
+               case PORT_B:
+                       intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+                       break;
+               case PORT_C:
+                       intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+                       break;
+               case PORT_D:
+                       intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+                       break;
+               default:
+                       BUG();
+               }
+       }
 
        /* Set up the DDC bus. */
        switch (port) {
        case PORT_A:
+               intel_encoder->hpd_pin = HPD_PORT_A;
                name = "DPDDC-A";
                break;
        case PORT_B:
-               dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_B;
                name = "DPDDC-B";
                break;
        case PORT_C:
-               dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_C;
                name = "DPDDC-C";
                break;
        case PORT_D:
-               dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_D;
                name = "DPDDC-D";
                break;
        default:
-               WARN(1, "Invalid port %c\n", port_name(port));
-               break;
+               BUG();
        }
 
        if (is_edp(intel_dp))
@@ -2971,6 +2953,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
+       intel_encoder->compute_config = intel_dp_compute_config;
        intel_encoder->enable = intel_enable_dp;
        intel_encoder->pre_enable = intel_pre_enable_dp;
        intel_encoder->disable = intel_disable_dp;
index 07ebac6fe8cace004082913a325da446ebb4b07d..d7bd031dd64279d9adac2c58cca95ad465cc902c 100644 (file)
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_helper.h>
 
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
 #define _wait_for(COND, MS, W) ({ \
-       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;   \
        int ret__ = 0;                                                  \
        while (!(COND)) {                                               \
                if (time_after(jiffies, timeout__)) {                   \
-                       ret__ = -ETIMEDOUT;                             \
+                       if (!(COND))                                    \
+                               ret__ = -ETIMEDOUT;                     \
                        break;                                          \
                }                                                       \
                if (W && drm_can_sleep())  {                            \
        ret__;                                                          \
 })
 
-#define wait_for_atomic_us(COND, US) ({ \
-       unsigned long timeout__ = jiffies + usecs_to_jiffies(US);       \
-       int ret__ = 0;                                                  \
-       while (!(COND)) {                                               \
-               if (time_after(jiffies, timeout__)) {                   \
-                       ret__ = -ETIMEDOUT;                             \
-                       break;                                          \
-               }                                                       \
-               cpu_relax();                                            \
-       }                                                               \
-       ret__;                                                          \
-})
-
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_atomic_us(COND, US) _wait_for((COND), \
+                                              DIV_ROUND_UP((US), 1000), 0)
 
 #define KHz(x) (1000*x)
 #define MHz(x) KHz(1000*x)
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 
-/* drm_display_mode->private_flags */
-#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
-#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-#define INTEL_MODE_DP_FORCE_6BPC (0x10)
-/* This flag must be set by the encoder's mode_fixup if it changes the crtc
- * timings in the mode to prevent the crtc fixup from overwriting them.
- * Currently only lvds needs that. */
-#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
-/*
- * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
- * to be used.
- */
-#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
-
-static inline void
-intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
-                               int multiplier)
-{
-       mode->clock *= multiplier;
-       mode->private_flags |= multiplier;
-}
-
-static inline int
-intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
-{
-       return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
-}
-
 struct intel_framebuffer {
        struct drm_framebuffer base;
        struct drm_i915_gem_object *obj;
@@ -158,9 +128,12 @@ struct intel_encoder {
        bool cloneable;
        bool connectors_active;
        void (*hot_plug)(struct intel_encoder *);
+       bool (*compute_config)(struct intel_encoder *,
+                              struct intel_crtc_config *);
        void (*pre_pll_enable)(struct intel_encoder *);
        void (*pre_enable)(struct intel_encoder *);
        void (*enable)(struct intel_encoder *);
+       void (*mode_set)(struct intel_encoder *intel_encoder);
        void (*disable)(struct intel_encoder *);
        void (*post_disable)(struct intel_encoder *);
        /* Read out the current hw state of this connector, returning true if
@@ -168,6 +141,7 @@ struct intel_encoder {
         * it is connected to in the pipe parameter. */
        bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
        int crtc_mask;
+       enum hpd_pin hpd_pin;
 };
 
 struct intel_panel {
@@ -199,6 +173,51 @@ struct intel_connector {
        struct edid *edid;
 };
 
+struct intel_crtc_config {
+       struct drm_display_mode requested_mode;
+       struct drm_display_mode adjusted_mode;
+       /* This flag must be set by the encoder's compute_config callback if it
+        * changes the crtc timings in the mode to prevent the crtc fixup from
+        * overwriting them.  Currently only lvds needs that. */
+       bool timings_set;
+       /* Whether to set up the PCH/FDI. Note that we never allow sharing
+        * between pch encoders and cpu encoders. */
+       bool has_pch_encoder;
+
+       /*
+        * Use reduced/limited/broadcast rbg range, compressing from the full
+        * range fed into the crtcs.
+        */
+       bool limited_color_range;
+
+       /* DP has a bunch of special case unfortunately, so mark the pipe
+        * accordingly. */
+       bool has_dp_encoder;
+       bool dither;
+
+       /* Controls for the clock computation, to override various stages. */
+       bool clock_set;
+
+       /* Settings for the intel dpll used on pretty much everything but
+        * haswell. */
+       struct dpll {
+               unsigned n;
+               unsigned m1, m2;
+               unsigned p1, p2;
+       } dpll;
+
+       int pipe_bpp;
+       struct intel_link_m_n dp_m_n;
+       /**
+        * This is currently used by DP and HDMI encoders since those can have a
+        * target pixel clock != the port link clock (which is currently stored
+        * in adjusted_mode->clock).
+        */
+       int pixel_target_clock;
+       /* Used by SDVO (and if we ever fix it, HDMI). */
+       unsigned pixel_multiplier;
+};
+
 struct intel_crtc {
        struct drm_crtc base;
        enum pipe pipe;
@@ -230,7 +249,8 @@ struct intel_crtc {
        int16_t cursor_x, cursor_y;
        int16_t cursor_width, cursor_height;
        bool cursor_visible;
-       unsigned int bpp;
+
+       struct intel_crtc_config config;
 
        /* We can share PLLs across outputs if the timings match */
        struct intel_pch_pll *pch_pll;
@@ -242,11 +262,16 @@ struct intel_crtc {
 
 struct intel_plane {
        struct drm_plane base;
+       int plane;
        enum pipe pipe;
        struct drm_i915_gem_object *obj;
        bool can_scale;
        int max_downscale;
        u32 lut_r[1024], lut_g[1024], lut_b[1024];
+       int crtc_x, crtc_y;
+       unsigned int crtc_w, crtc_h;
+       uint32_t src_x, src_y;
+       uint32_t src_w, src_h;
        void (*update_plane)(struct drm_plane *plane,
                             struct drm_framebuffer *fb,
                             struct drm_i915_gem_object *obj,
@@ -347,7 +372,7 @@ struct dip_infoframe {
 } __attribute__((packed));
 
 struct intel_hdmi {
-       u32 sdvox_reg;
+       u32 hdmi_reg;
        int ddc_bus;
        uint32_t color_range;
        bool color_range_auto;
@@ -366,6 +391,7 @@ struct intel_hdmi {
 
 struct intel_dp {
        uint32_t output_reg;
+       uint32_t aux_ch_ctl_reg;
        uint32_t DP;
        uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
        bool has_audio;
@@ -443,13 +469,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev,
-                           int sdvox_reg, enum port port);
+                           int hdmi_reg, enum port port);
 extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                                      struct intel_connector *intel_connector);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode);
+extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+                                     struct intel_crtc_config *pipe_config);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
                            bool is_sdvob);
@@ -464,18 +489,14 @@ extern void intel_dp_init(struct drm_device *dev, int output_reg,
                          enum port port);
 extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                                    struct intel_connector *intel_connector);
-void
-intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                struct drm_display_mode *adjusted_mode);
 extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
 extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
+extern bool intel_dp_compute_config(struct intel_encoder *encoder,
+                                   struct intel_crtc_config *pipe_config);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
 extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
@@ -483,11 +504,8 @@ extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
 extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
 extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
 extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
-extern int intel_edp_target_clock(struct intel_encoder *,
-                                 struct drm_display_mode *mode);
 extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
                                      enum plane plane);
 
@@ -531,6 +549,7 @@ extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
 extern void intel_connector_dpms(struct drm_connector *, int mode);
 extern bool intel_connector_get_hw_state(struct intel_connector *connector);
 extern void intel_modeset_check_state(struct drm_device *dev);
+extern void intel_plane_restore(struct drm_plane *plane);
 
 
 static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -636,6 +655,10 @@ extern void intel_init_clock_gating(struct drm_device *dev);
 extern void intel_write_eld(struct drm_encoder *encoder,
                            struct drm_display_mode *mode);
 extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+                                        struct intel_link_m_n *m_n);
+extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+                                        struct intel_link_m_n *m_n);
 extern void intel_prepare_ddi(struct drm_device *dev);
 extern void hsw_fdi_link_train(struct drm_crtc *crtc);
 extern void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -681,7 +704,7 @@ extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe);
 extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
 extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
 extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
                                              enum transcoder cpu_transcoder);
 extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
@@ -695,4 +718,6 @@ extern bool
 intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
+extern void intel_display_handle_reset(struct drm_device *dev);
+
 #endif /* __INTEL_DRV_H__ */
index 981bdce3634ebcff3a8c11c80cd44d1ba838a229..8d81c929b7b5a048c2727295e1da51e98459ed41 100644 (file)
@@ -150,7 +150,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
        }
        info->screen_size = size;
 
-//     memset(info->screen_base, 0, size);
+       /* This driver doesn't need a VT switch to restore the mode on resume */
+       info->skip_vt_switch = true;
 
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -227,7 +228,7 @@ int intel_fbdev_init(struct drm_device *dev)
        ifbdev->helper.funcs = &intel_fb_helper_funcs;
 
        ret = drm_fb_helper_init(dev, &ifbdev->helper,
-                                dev_priv->num_pipe,
+                                INTEL_INFO(dev)->num_pipes,
                                 INTELFB_CONN_LIMIT);
        if (ret) {
                kfree(ifbdev);
index fa8ec4a26041c65ccf48ebba4bc8bd9ec9b30946..ee4a8da8311eea38fcfec396b630b7e114a1849a 100644 (file)
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 
        enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
-       WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+       WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
             "HDMI port enabled, expecting disabled\n");
 }
 
@@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
        }
 }
 
-static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+                                 enum transcoder cpu_transcoder)
 {
        switch (frame->type) {
        case DIP_TYPE_AVI:
-               return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+               return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
        case DIP_TYPE_SPD:
-               return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+               return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
        default:
                DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
                return 0;
@@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
-       u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
+       u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder);
        unsigned int i, len = DIP_HEADER_SIZE + frame->len;
        u32 val = I915_READ(ctl_reg);
 
@@ -332,6 +333,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                         struct drm_display_mode *adjusted_mode)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct dip_infoframe avi_if = {
                .type = DIP_TYPE_AVI,
                .ver = DIP_VERSION_AVI,
@@ -342,7 +344,7 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
        if (intel_hdmi->rgb_quant_range_selectable) {
-               if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+               if (intel_crtc->config.limited_color_range)
                        avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
                else
                        avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
@@ -568,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
        struct drm_i915_private *dev_priv = encoder->dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-       u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+       u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
        u32 val = I915_READ(reg);
 
        assert_hdmi_port_disabled(intel_hdmi);
@@ -597,40 +599,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-       u32 sdvox;
+       u32 hdmi_val;
 
-       sdvox = SDVO_ENCODING_HDMI;
-       if (!HAS_PCH_SPLIT(dev))
-               sdvox |= intel_hdmi->color_range;
+       hdmi_val = SDVO_ENCODING_HDMI;
+       if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+               hdmi_val |= intel_hdmi->color_range;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-               sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+               hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-               sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+               hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
 
-       if (intel_crtc->bpp > 24)
-               sdvox |= COLOR_FORMAT_12bpc;
+       if (intel_crtc->config.pipe_bpp > 24)
+               hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
        else
-               sdvox |= COLOR_FORMAT_8bpc;
+               hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
 
        /* Required on CPT */
        if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
-               sdvox |= HDMI_MODE_SELECT;
+               hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
        if (intel_hdmi->has_audio) {
                DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
                                 pipe_name(intel_crtc->pipe));
-               sdvox |= SDVO_AUDIO_ENABLE;
-               sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+               hdmi_val |= SDVO_AUDIO_ENABLE;
+               hdmi_val |= HDMI_MODE_SELECT_HDMI;
                intel_write_eld(encoder, adjusted_mode);
        }
 
        if (HAS_PCH_CPT(dev))
-               sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
-       else if (intel_crtc->pipe == PIPE_B)
-               sdvox |= SDVO_PIPE_B_SELECT;
+               hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+       else
+               hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
 
-       I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
-       POSTING_READ(intel_hdmi->sdvox_reg);
+       I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+       POSTING_READ(intel_hdmi->hdmi_reg);
 
        intel_hdmi->set_infoframes(encoder, adjusted_mode);
 }
@@ -643,7 +645,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 tmp;
 
-       tmp = I915_READ(intel_hdmi->sdvox_reg);
+       tmp = I915_READ(intel_hdmi->hdmi_reg);
 
        if (!(tmp & SDVO_ENABLE))
                return false;
@@ -660,6 +662,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
        u32 enable_bits = SDVO_ENABLE;
@@ -667,38 +670,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
        if (intel_hdmi->has_audio)
                enable_bits |= SDVO_AUDIO_ENABLE;
 
-       temp = I915_READ(intel_hdmi->sdvox_reg);
+       temp = I915_READ(intel_hdmi->hdmi_reg);
 
        /* HW workaround for IBX, we need to move the port to transcoder A
-        * before disabling it. */
-       if (HAS_PCH_IBX(dev)) {
-               struct drm_crtc *crtc = encoder->base.crtc;
-               int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
-               /* Restore the transcoder select bit. */
-               if (pipe == PIPE_B)
-                       enable_bits |= SDVO_PIPE_B_SELECT;
-       }
+        * before disabling it, so restore the transcoder select bit here. */
+       if (HAS_PCH_IBX(dev))
+               enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
 
        /* HW workaround, need to toggle enable bit off and on for 12bpc, but
         * we do this anyway which shows more stable in testing.
         */
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
-               POSTING_READ(intel_hdmi->sdvox_reg);
+               I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+               POSTING_READ(intel_hdmi->hdmi_reg);
        }
 
        temp |= enable_bits;
 
-       I915_WRITE(intel_hdmi->sdvox_reg, temp);
-       POSTING_READ(intel_hdmi->sdvox_reg);
+       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+       POSTING_READ(intel_hdmi->hdmi_reg);
 
        /* HW workaround, need to write this twice for issue that may result
         * in first write getting masked.
         */
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(intel_hdmi->sdvox_reg, temp);
-               POSTING_READ(intel_hdmi->sdvox_reg);
+               I915_WRITE(intel_hdmi->hdmi_reg, temp);
+               POSTING_READ(intel_hdmi->hdmi_reg);
        }
 }
 
@@ -710,7 +707,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        u32 temp;
        u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
 
-       temp = I915_READ(intel_hdmi->sdvox_reg);
+       temp = I915_READ(intel_hdmi->hdmi_reg);
 
        /* HW workaround for IBX, we need to move the port to transcoder A
         * before disabling it. */
@@ -720,12 +717,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
 
                if (temp & SDVO_PIPE_B_SELECT) {
                        temp &= ~SDVO_PIPE_B_SELECT;
-                       I915_WRITE(intel_hdmi->sdvox_reg, temp);
-                       POSTING_READ(intel_hdmi->sdvox_reg);
+                       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+                       POSTING_READ(intel_hdmi->hdmi_reg);
 
                        /* Again we need to write this twice. */
-                       I915_WRITE(intel_hdmi->sdvox_reg, temp);
-                       POSTING_READ(intel_hdmi->sdvox_reg);
+                       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+                       POSTING_READ(intel_hdmi->hdmi_reg);
 
                        /* Transcoder selection bits only update
                         * effectively on vblank. */
@@ -740,21 +737,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
         * we do this anyway which shows more stable in testing.
         */
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
-               POSTING_READ(intel_hdmi->sdvox_reg);
+               I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+               POSTING_READ(intel_hdmi->hdmi_reg);
        }
 
        temp &= ~enable_bits;
 
-       I915_WRITE(intel_hdmi->sdvox_reg, temp);
-       POSTING_READ(intel_hdmi->sdvox_reg);
+       I915_WRITE(intel_hdmi->hdmi_reg, temp);
+       POSTING_READ(intel_hdmi->hdmi_reg);
 
        /* HW workaround, need to write this twice for issue that may result
         * in first write getting masked.
         */
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(intel_hdmi->sdvox_reg, temp);
-               POSTING_READ(intel_hdmi->sdvox_reg);
+               I915_WRITE(intel_hdmi->hdmi_reg, temp);
+               POSTING_READ(intel_hdmi->hdmi_reg);
        }
 }
 
@@ -772,23 +769,40 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
-                          const struct drm_display_mode *mode,
-                          struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+                              struct intel_crtc_config *pipe_config)
 {
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
        if (intel_hdmi->color_range_auto) {
                /* See CEA-861-E - 5.1 Default Encoding Parameters */
                if (intel_hdmi->has_hdmi_sink &&
                    drm_match_cea_mode(adjusted_mode) > 1)
-                       intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+                       intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
                else
                        intel_hdmi->color_range = 0;
        }
 
        if (intel_hdmi->color_range)
-               adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+               pipe_config->limited_color_range = true;
+
+       if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+               pipe_config->has_pch_encoder = true;
+
+       /*
+        * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+        * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+        * outputs.
+        */
+       if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
+               DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
+               pipe_config->pipe_bpp = 12*3;
+       } else {
+               DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
+               pipe_config->pipe_bpp = 8*3;
+       }
 
        return true;
 }
@@ -916,7 +930,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
                        break;
                case INTEL_BROADCAST_RGB_LIMITED:
                        intel_hdmi->color_range_auto = false;
-                       intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
+                       intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
                        break;
                default:
                        return -EINVAL;
@@ -941,7 +955,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
-       .mode_fixup = intel_hdmi_mode_fixup,
        .mode_set = intel_hdmi_mode_set,
 };
 
@@ -992,29 +1005,30 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        switch (port) {
        case PORT_B:
                intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-               dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_B;
                break;
        case PORT_C:
                intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-               dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_C;
                break;
        case PORT_D:
                intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
-               dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
+               intel_encoder->hpd_pin = HPD_PORT_D;
                break;
        case PORT_A:
+               intel_encoder->hpd_pin = HPD_PORT_A;
                /* Internal port only for eDP. */
        default:
                BUG();
        }
 
-       if (!HAS_PCH_SPLIT(dev)) {
-               intel_hdmi->write_infoframe = g4x_write_infoframe;
-               intel_hdmi->set_infoframes = g4x_set_infoframes;
-       } else if (IS_VALLEYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev)) {
                intel_hdmi->write_infoframe = vlv_write_infoframe;
                intel_hdmi->set_infoframes = vlv_set_infoframes;
-       } else if (IS_HASWELL(dev)) {
+       } else if (!HAS_PCH_SPLIT(dev)) {
+               intel_hdmi->write_infoframe = g4x_write_infoframe;
+               intel_hdmi->set_infoframes = g4x_set_infoframes;
+       } else if (HAS_DDI(dev)) {
                intel_hdmi->write_infoframe = hsw_write_infoframe;
                intel_hdmi->set_infoframes = hsw_set_infoframes;
        } else if (HAS_PCH_IBX(dev)) {
@@ -1045,7 +1059,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        }
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
 {
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
@@ -1069,6 +1083,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
+       intel_encoder->compute_config = intel_hdmi_compute_config;
        intel_encoder->enable = intel_enable_hdmi;
        intel_encoder->disable = intel_disable_hdmi;
        intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
@@ -1078,7 +1093,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
        intel_encoder->cloneable = false;
 
        intel_dig_port->port = port;
-       intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+       intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
        intel_dig_port->dp.output_reg = 0;
 
        intel_hdmi_init_connector(intel_dig_port, intel_connector);
index 3d1d97488cc95527c699dd6eb8b0c2ec78cc6adf..ca2d903c19bb0340a1bebdb6275b14cf46df0460 100644 (file)
@@ -261,8 +261,6 @@ centre_horizontally(struct drm_display_mode *mode,
 
        mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
        mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
-
-       mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
 }
 
 static void
@@ -284,8 +282,6 @@ centre_vertically(struct drm_display_mode *mode,
 
        mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
        mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
-
-       mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
 }
 
 static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -301,17 +297,20 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
        return (FACTOR * ratio + FACTOR/2) / FACTOR;
 }
 
-static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+                                     struct intel_crtc_config *pipe_config)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+       struct intel_lvds_encoder *lvds_encoder =
+               to_lvds_encoder(&intel_encoder->base);
        struct intel_connector *intel_connector =
                &lvds_encoder->attached_connector->base;
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+       struct drm_display_mode *mode = &pipe_config->requested_mode;
        struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
        u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+       unsigned int lvds_bpp;
        int pipe;
 
        /* Should never happen!! */
@@ -323,6 +322,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
        if (intel_encoder_check_is_cloned(&lvds_encoder->base))
                return false;
 
+       if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
+           LVDS_A3_POWER_UP)
+               lvds_bpp = 8*3;
+       else
+               lvds_bpp = 6*3;
+
+       if (lvds_bpp != pipe_config->pipe_bpp) {
+               DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+                             pipe_config->pipe_bpp, lvds_bpp);
+               pipe_config->pipe_bpp = lvds_bpp;
+       }
        /*
         * We have timings from the BIOS for the panel, put them in
         * to the adjusted mode.  The CRTC will be set up for this mode,
@@ -333,6 +343,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                               adjusted_mode);
 
        if (HAS_PCH_SPLIT(dev)) {
+               pipe_config->has_pch_encoder = true;
+
                intel_pch_panel_fitting(dev,
                                        intel_connector->panel.fitting_mode,
                                        mode, adjusted_mode);
@@ -359,6 +371,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                I915_WRITE(BCLRPAT(pipe), 0);
 
        drm_mode_set_crtcinfo(adjusted_mode, 0);
+       pipe_config->timings_set = true;
 
        switch (intel_connector->panel.fitting_mode) {
        case DRM_MODE_SCALE_CENTER:
@@ -661,7 +674,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 }
 
 static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
-       .mode_fixup = intel_lvds_mode_fixup,
        .mode_set = intel_lvds_mode_set,
 };
 
@@ -850,6 +862,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Fujitsu Esprimo Q900",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+               },
+       },
 
        { }     /* terminating entry */
 };
@@ -1019,12 +1039,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
 {
        /* With the introduction of the PCH we gained a dedicated
         * LVDS presence pin, use it. */
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
                return true;
 
        /* Otherwise LVDS was only attached to mobile products,
         * except for the inglorious 830gm */
-       return IS_MOBILE(dev) && !IS_I830(dev);
+       if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+               return true;
+
+       return false;
 }
 
 /**
@@ -1102,6 +1125,7 @@ bool intel_lvds_init(struct drm_device *dev)
        intel_encoder->enable = intel_enable_lvds;
        intel_encoder->pre_enable = intel_pre_enable_lvds;
        intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+       intel_encoder->compute_config = intel_lvds_compute_config;
        intel_encoder->disable = intel_disable_lvds;
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
index bee8cb6108a7d44883286ef4b35bb6d7f8713a0f..7874cecc286355311c55c6a2b49dad7216015cbf 100644 (file)
@@ -286,8 +286,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->backlight_level = level;
-       if (dev_priv->backlight_enabled)
+       dev_priv->backlight.level = level;
+       if (dev_priv->backlight.device)
+               dev_priv->backlight.device->props.brightness = level;
+
+       if (dev_priv->backlight.enabled)
                intel_panel_actually_set_backlight(dev, level);
 }
 
@@ -295,7 +298,7 @@ void intel_panel_disable_backlight(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->backlight_enabled = false;
+       dev_priv->backlight.enabled = false;
        intel_panel_actually_set_backlight(dev, 0);
 
        if (INTEL_INFO(dev)->gen >= 4) {
@@ -318,8 +321,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->backlight_level == 0)
-               dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+       if (dev_priv->backlight.level == 0) {
+               dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+               if (dev_priv->backlight.device)
+                       dev_priv->backlight.device->props.brightness =
+                               dev_priv->backlight.level;
+       }
 
        if (INTEL_INFO(dev)->gen >= 4) {
                uint32_t reg, tmp;
@@ -335,7 +342,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
                if (tmp & BLM_PWM_ENABLE)
                        goto set_level;
 
-               if (dev_priv->num_pipe == 3)
+               if (INTEL_INFO(dev)->num_pipes == 3)
                        tmp &= ~BLM_PIPE_SELECT_IVB;
                else
                        tmp &= ~BLM_PIPE_SELECT;
@@ -360,16 +367,16 @@ set_level:
         * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
         * registers are set.
         */
-       dev_priv->backlight_enabled = true;
-       intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+       dev_priv->backlight.enabled = true;
+       intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->backlight_level = intel_panel_get_backlight(dev);
-       dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+       dev_priv->backlight.level = intel_panel_get_backlight(dev);
+       dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
 }
 
 enum drm_connector_status
@@ -405,8 +412,7 @@ static int intel_panel_update_status(struct backlight_device *bd)
 static int intel_panel_get_brightness(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return dev_priv->backlight_level;
+       return intel_panel_get_backlight(dev);
 }
 
 static const struct backlight_ops intel_panel_bl_ops = {
@@ -424,31 +430,31 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
 
        memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
+       props.brightness = dev_priv->backlight.level;
        props.max_brightness = _intel_panel_get_max_backlight(dev);
        if (props.max_brightness == 0) {
                DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
                return -ENODEV;
        }
-       dev_priv->backlight =
+       dev_priv->backlight.device =
                backlight_device_register("intel_backlight",
                                          &connector->kdev, dev,
                                          &intel_panel_bl_ops, &props);
 
-       if (IS_ERR(dev_priv->backlight)) {
+       if (IS_ERR(dev_priv->backlight.device)) {
                DRM_ERROR("Failed to register backlight: %ld\n",
-                         PTR_ERR(dev_priv->backlight));
-               dev_priv->backlight = NULL;
+                         PTR_ERR(dev_priv->backlight.device));
+               dev_priv->backlight.device = NULL;
                return -ENODEV;
        }
-       dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
        return 0;
 }
 
 void intel_panel_destroy_backlight(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       if (dev_priv->backlight)
-               backlight_device_unregister(dev_priv->backlight);
+       if (dev_priv->backlight.device)
+               backlight_device_unregister(dev_priv->backlight.device);
 }
 #else
 int intel_panel_setup_backlight(struct drm_connector *connector)
index adca00783e61b304723d3d05197b53f47b6b33db..13a0666a53b42e9645179a30a3f3a2a69df09809 100644 (file)
@@ -2460,10 +2460,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        if (val == dev_priv->rps.cur_delay)
                return;
 
-       I915_WRITE(GEN6_RPNSWREQ,
-                  GEN6_FREQUENCY(val) |
-                  GEN6_OFFSET(0) |
-                  GEN6_AGGRESSIVE_TURBO);
+       if (IS_HASWELL(dev))
+               I915_WRITE(GEN6_RPNSWREQ,
+                          HSW_FREQUENCY(val));
+       else
+               I915_WRITE(GEN6_RPNSWREQ,
+                          GEN6_FREQUENCY(val) |
+                          GEN6_OFFSET(0) |
+                          GEN6_AGGRESSIVE_TURBO);
 
        /* Make sure we continue to get interrupts
         * until we hit the minimum or maximum frequencies.
@@ -2601,12 +2605,19 @@ static void gen6_enable_rps(struct drm_device *dev)
                   GEN6_RC_CTL_EI_MODE(1) |
                   GEN6_RC_CTL_HW_ENABLE);
 
-       I915_WRITE(GEN6_RPNSWREQ,
-                  GEN6_FREQUENCY(10) |
-                  GEN6_OFFSET(0) |
-                  GEN6_AGGRESSIVE_TURBO);
-       I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                  GEN6_FREQUENCY(12));
+       if (IS_HASWELL(dev)) {
+               I915_WRITE(GEN6_RPNSWREQ,
+                          HSW_FREQUENCY(10));
+               I915_WRITE(GEN6_RC_VIDEO_FREQ,
+                          HSW_FREQUENCY(12));
+       } else {
+               I915_WRITE(GEN6_RPNSWREQ,
+                          GEN6_FREQUENCY(10) |
+                          GEN6_OFFSET(0) |
+                          GEN6_AGGRESSIVE_TURBO);
+               I915_WRITE(GEN6_RC_VIDEO_FREQ,
+                          GEN6_FREQUENCY(12));
+       }
 
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -2628,12 +2639,14 @@ static void gen6_enable_rps(struct drm_device *dev)
                   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
        ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
-       if (!ret) {
+       if (!ret && (IS_GEN6(dev) || IS_IVYBRIDGE(dev))) {
                pcu_mbox = 0;
                ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
-               if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+               if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+                       DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n",
+                                        (dev_priv->rps.max_delay & 0xff) * 50,
+                                        (pcu_mbox & 0xff) * 50);
                        dev_priv->rps.max_delay = pcu_mbox & 0xff;
-                       DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
                }
        } else {
                DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
@@ -2821,7 +2834,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
        ret = intel_ring_idle(ring);
        dev_priv->mm.interruptible = was_interruptible;
        if (ret) {
-               DRM_ERROR("failed to enable ironlake power power savings\n");
+               DRM_ERROR("failed to enable ironlake power savings\n");
                ironlake_teardown_rc6(dev);
                return;
        }
@@ -3768,6 +3781,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
                   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
+       /* WaSwitchSolVfFArbitrationPriority */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
        /* XXX: This is a workaround for early silicon revisions and should be
         * removed later.
         */
@@ -3899,8 +3915,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+       /* WaDisablePSDDualDispatchEnable */
        I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+                  _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+                                     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
        /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -3967,25 +3985,21 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
-       /*
-        * On ValleyView, the GUnit needs to signal the GT
-        * when flip and other events complete.  So enable
-        * all the GUnit->GT interrupts here
-        */
-       I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
-                  PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
-                  SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
-                  PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
-                  PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
-                  SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
-                  PLANEA_FLIPDONE_INT_EN);
-
        /*
         * WaDisableVLVClockGating_VBIIssue
         * Disable clock gating on th GCFG unit to prevent a delay
         * in the reporting of vblank events.
         */
-       I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+       I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
+
+       /* Conservative clock gating settings for now */
+       I915_WRITE(0x9400, 0xffffffff);
+       I915_WRITE(0x9404, 0xffffffff);
+       I915_WRITE(0x9408, 0xffffffff);
+       I915_WRITE(0x940c, 0xffffffff);
+       I915_WRITE(0x9410, 0xffffffff);
+       I915_WRITE(0x9414, 0xffffffff);
+       I915_WRITE(0x9418, 0xffffffff);
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -4076,7 +4090,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
        bool is_enabled, enable_requested;
        uint32_t tmp;
 
-       if (!IS_HASWELL(dev))
+       if (!HAS_POWER_WELL(dev))
                return;
 
        if (!i915_disable_power_well && !enable)
@@ -4114,7 +4128,7 @@ void intel_init_power_well(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!IS_HASWELL(dev))
+       if (!HAS_POWER_WELL(dev))
                return;
 
        /* For now, we need the power well to be always enabled. */
@@ -4274,21 +4288,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
 
 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
-       u32 forcewake_ack;
-
-       if (IS_HASWELL(dev_priv->dev))
-               forcewake_ack = FORCEWAKE_ACK_HSW;
-       else
-               forcewake_ack = FORCEWAKE_ACK;
-
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-       I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+       I915_WRITE_NOTRACE(FORCEWAKE, 1);
        POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
@@ -4311,7 +4318,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
        else
                forcewake_ack = FORCEWAKE_MT_ACK;
 
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
@@ -4319,7 +4326,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
        /* something from same cacheline, but !FORCEWAKE_MT */
        POSTING_READ(ECOBUS);
 
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
@@ -4409,15 +4416,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 {
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+       I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
                            FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+               DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
+                            FORCEWAKE_KERNEL),
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
 
        __gen6_gt_wait_for_thread_c0(dev_priv);
 }
@@ -4425,8 +4439,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* something from same cacheline, but !FORCEWAKE_VLV */
-       POSTING_READ(FORCEWAKE_ACK_VLV);
+       I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
 }
 
@@ -4511,3 +4526,56 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
 
        return 0;
 }
+
+static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
+                       u8 addr, u32 *val)
+{
+       u32 cmd, devfn, port, be, bar;
+
+       bar = 0;
+       be = 0xf;
+       port = IOSF_PORT_PUNIT;
+       devfn = PCI_DEVFN(2, 0);
+
+       cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+               (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+               (bar << IOSF_BAR_SHIFT);
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+       if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
+               DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
+                                opcode == PUNIT_OPCODE_REG_READ ?
+                                "read" : "write");
+               return -EAGAIN;
+       }
+
+       I915_WRITE(VLV_IOSF_ADDR, addr);
+       if (opcode == PUNIT_OPCODE_REG_WRITE)
+               I915_WRITE(VLV_IOSF_DATA, *val);
+       I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+       if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
+                    500)) {
+               DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
+                         opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
+                         addr);
+               return -ETIMEDOUT;
+       }
+
+       if (opcode == PUNIT_OPCODE_REG_READ)
+               *val = I915_READ(VLV_IOSF_DATA);
+       I915_WRITE(VLV_IOSF_DATA, 0);
+
+       return 0;
+}
+
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
+{
+       return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
+}
+
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+       return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
+}
index d07a8cdf998ec4b1eadda9aa9906a8053f25b1ee..298dc85ec32c876dc559356976d3e5479c5b786e 100644 (file)
@@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
                return;
        }
 
-       if (intel_sdvo->sdvo_reg == SDVOB) {
-               cval = I915_READ(SDVOC);
-       } else {
-               bval = I915_READ(SDVOB);
-       }
+       if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+               cval = I915_READ(GEN3_SDVOC);
+       else
+               bval = I915_READ(GEN3_SDVOB);
+
        /*
         * Write the registers twice for luck. Sometimes,
         * writing them only once doesn't appear to 'stick'.
@@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
         */
        for (i = 0; i < 2; i++)
        {
-               I915_WRITE(SDVOB, bval);
-               I915_READ(SDVOB);
-               I915_WRITE(SDVOC, cval);
-               I915_READ(SDVOC);
+               I915_WRITE(GEN3_SDVOB, bval);
+               I915_READ(GEN3_SDVOB);
+               I915_WRITE(GEN3_SDVOC, cval);
+               I915_READ(GEN3_SDVOC);
        }
 }
 
@@ -451,7 +451,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
        int i, ret = true;
 
         /* Would be simpler to allocate both in one go ? */        
-       buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+       buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
        if (!buf)
                return false;
 
@@ -788,7 +788,6 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        v_sync_offset = mode->vsync_start - mode->vdisplay;
 
        mode_clock = mode->clock;
-       mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
        mode_clock /= 10;
        dtd->part1.clock = mode_clock;
 
@@ -957,14 +956,17 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
                .len = DIP_LEN_AVI,
        };
        uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+       struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
 
        if (intel_sdvo->rgb_quant_range_selectable) {
-               if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
+               if (intel_crtc->config.limited_color_range)
                        avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
                else
                        avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
        }
 
+       avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
        intel_dip_infoframe_csum(&avi_if);
 
        /* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1039,12 +1041,18 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
        return true;
 }
 
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+                                     struct intel_crtc_config *pipe_config)
 {
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
-       int multiplier;
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+       struct drm_display_mode *mode = &pipe_config->requested_mode;
+
+       DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+       pipe_config->pipe_bpp = 8*3;
+
+       if (HAS_PCH_SPLIT(encoder->base.dev))
+               pipe_config->has_pch_encoder = true;
 
        /* We need to construct preferred input timings based on our
         * output timings.  To do that, we have to set the output
@@ -1071,37 +1079,40 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
        /* Make the CRTC code factor in the SDVO pixel multiplier.  The
         * SDVO device will factor out the multiplier during mode_set.
         */
-       multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
-       intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+       pipe_config->pixel_multiplier =
+               intel_sdvo_get_pixel_multiplier(adjusted_mode);
+       adjusted_mode->clock *= pipe_config->pixel_multiplier;
 
        if (intel_sdvo->color_range_auto) {
                /* See CEA-861-E - 5.1 Default Encoding Parameters */
+               /* FIXME: This bit is only valid when using TMDS encoding and 8
+                * bit per color mode. */
                if (intel_sdvo->has_hdmi_monitor &&
                    drm_match_cea_mode(adjusted_mode) > 1)
-                       intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+                       intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
                else
                        intel_sdvo->color_range = 0;
        }
 
        if (intel_sdvo->color_range)
-               adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
+               pipe_config->limited_color_range = true;
 
        return true;
 }
 
-static void intel_sdvo_mode_set(struct drm_encoder *encoder,
-                               struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
+static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_crtc *crtc = intel_encoder->base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
        struct intel_sdvo_dtd input_dtd, output_dtd;
-       int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
        int rate;
 
        if (!mode)
@@ -1161,7 +1172,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                DRM_INFO("Setting input timings on %s failed\n",
                         SDVO_NAME(intel_sdvo));
 
-       switch (pixel_multiplier) {
+       switch (intel_crtc->config.pixel_multiplier) {
        default:
        case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
        case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1182,10 +1193,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        } else {
                sdvox = I915_READ(intel_sdvo->sdvo_reg);
                switch (intel_sdvo->sdvo_reg) {
-               case SDVOB:
+               case GEN3_SDVOB:
                        sdvox &= SDVOB_PRESERVE_MASK;
                        break;
-               case SDVOC:
+               case GEN3_SDVOC:
                        sdvox &= SDVOC_PRESERVE_MASK;
                        break;
                }
@@ -1193,9 +1204,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        }
 
        if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
-               sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+               sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
        else
-               sdvox |= TRANSCODER(intel_crtc->pipe);
+               sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
 
        if (intel_sdvo->has_hdmi_audio)
                sdvox |= SDVO_AUDIO_ENABLE;
@@ -1205,7 +1216,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                /* done in crtc_mode_set as it lives inside the dpll register */
        } else {
-               sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+               sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+                       << SDVO_PORT_MULTIPLY_SHIFT;
        }
 
        if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
@@ -1219,8 +1231,12 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
        struct intel_sdvo_connector *intel_sdvo_connector =
                to_intel_sdvo_connector(&connector->base);
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+       struct drm_i915_private *dev_priv = intel_sdvo->base.base.dev->dev_private;
        u16 active_outputs;
 
+       if (!(I915_READ(intel_sdvo->sdvo_reg) & SDVO_ENABLE))
+               return false;
+
        intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
        if (active_outputs & intel_sdvo_connector->output_flag)
@@ -1305,15 +1321,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
        temp = I915_READ(intel_sdvo->sdvo_reg);
        if ((temp & SDVO_ENABLE) == 0) {
                /* HW workaround for IBX, we need to move the port
-                * to transcoder A before disabling it. */
-               if (HAS_PCH_IBX(dev)) {
-                       struct drm_crtc *crtc = encoder->base.crtc;
-                       int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
-                       /* Restore the transcoder select bit. */
-                       if (pipe == PIPE_B)
-                               temp |= SDVO_PIPE_B_SELECT;
-               }
+                * to transcoder A before disabling it, so restore it here. */
+               if (HAS_PCH_IBX(dev))
+                       temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
 
                intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
        }
@@ -1932,7 +1942,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        break;
                case INTEL_BROADCAST_RGB_LIMITED:
                        intel_sdvo->color_range_auto = false;
-                       intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
+                       /* FIXME: this bit is only valid when using TMDS
+                        * encoding and 8 bit per color mode. */
+                       intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
                        break;
                default:
                        return -EINVAL;
@@ -2040,11 +2052,6 @@ done:
 #undef CHECK_PROPERTY
 }
 
-static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
-       .mode_fixup = intel_sdvo_mode_fixup,
-       .mode_set = intel_sdvo_mode_set,
-};
-
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
        .dpms = intel_sdvo_dpms,
        .detect = intel_sdvo_detect,
@@ -2779,9 +2786,15 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                        SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
        }
 
-       drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+       /* Only enable the hotplug irq if we need it, to work around noisy
+        * hotplug lines.
+        */
+       if (intel_sdvo->hotplug_active)
+               intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
 
+       intel_encoder->compute_config = intel_sdvo_compute_config;
        intel_encoder->disable = intel_disable_sdvo;
+       intel_encoder->mode_set = intel_sdvo_mode_set;
        intel_encoder->enable = intel_enable_sdvo;
        intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
 
@@ -2807,12 +2820,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
         */
        intel_sdvo->base.cloneable = false;
 
-       /* Only enable the hotplug irq if we need it, to work around noisy
-        * hotplug lines.
-        */
-       if (intel_sdvo->hotplug_active)
-               dev_priv->hotplug_supported_mask |= hotplug_mask;
-
        intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
        /* Set the input timing to the screen. Assume always input 0. */
index 1b6eb76beb7c1dcebf2f7f5df9596fff18a97ada..c7d25c5dd4e63967286904e17d326a711e746f5b 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = dplane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(dplane);
+       int pipe = intel_plane->pipe;
+       int plane = intel_plane->plane;
+       u32 sprctl;
+       unsigned long sprsurf_offset, linear_offset;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+       sprctl = I915_READ(SPCNTR(pipe, plane));
+
+       /* Mask out pixel format bits in case we change it */
+       sprctl &= ~SP_PIXFORMAT_MASK;
+       sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
+       sprctl &= ~SP_TILED;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_YUYV:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+               break;
+       case DRM_FORMAT_RGB565:
+               sprctl |= SP_FORMAT_BGR565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               sprctl |= SP_FORMAT_BGRX8888;
+               break;
+       case DRM_FORMAT_ARGB8888:
+               sprctl |= SP_FORMAT_BGRA8888;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               sprctl |= SP_FORMAT_RGBX1010102;
+               break;
+       case DRM_FORMAT_ABGR2101010:
+               sprctl |= SP_FORMAT_RGBA1010102;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               sprctl |= SP_FORMAT_RGBX8888;
+               break;
+       case DRM_FORMAT_ABGR8888:
+               sprctl |= SP_FORMAT_RGBA8888;
+               break;
+       default:
+               /*
+                * If we get here one of the upper layers failed to filter
+                * out the unsupported plane formats
+                */
+               BUG();
+               break;
+       }
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               sprctl |= SP_TILED;
+
+       sprctl |= SP_ENABLE;
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+       I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+       I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+       linear_offset = y * fb->pitches[0] + x * pixel_size;
+       sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+                                                       obj->tiling_mode,
+                                                       pixel_size,
+                                                       fb->pitches[0]);
+       linear_offset -= sprsurf_offset;
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+       else
+               I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+       I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+       I915_WRITE(SPCNTR(pipe, plane), sprctl);
+       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+                            sprsurf_offset);
+       POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane)
+{
+       struct drm_device *dev = dplane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(dplane);
+       int pipe = intel_plane->pipe;
+       int plane = intel_plane->plane;
+
+       I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
+                  ~SP_ENABLE);
+       /* Activate double buffered register update */
+       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+       POSTING_READ(SPSURF(pipe, plane));
+}
+
+static int
+vlv_update_colorkey(struct drm_plane *dplane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = dplane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(dplane);
+       int pipe = intel_plane->pipe;
+       int plane = intel_plane->plane;
+       u32 sprctl;
+
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               return -EINVAL;
+
+       I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+       I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+       I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+
+       sprctl = I915_READ(SPCNTR(pipe, plane));
+       sprctl &= ~SP_SOURCE_KEY;
+       if (key->flags & I915_SET_COLORKEY_SOURCE)
+               sprctl |= SP_SOURCE_KEY;
+       I915_WRITE(SPCNTR(pipe, plane), sprctl);
+
+       POSTING_READ(SPKEYMSK(pipe, plane));
+
+       return 0;
+}
+
+static void
+vlv_get_colorkey(struct drm_plane *dplane,
+                struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = dplane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(dplane);
+       int pipe = intel_plane->pipe;
+       int plane = intel_plane->plane;
+       u32 sprctl;
+
+       key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
+       key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
+       key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
+
+       sprctl = I915_READ(SPCNTR(pipe, plane));
+       if (sprctl & SP_SOURCE_KEY)
+               key->flags = I915_SET_COLORKEY_SOURCE;
+       else
+               key->flags = I915_SET_COLORKEY_NONE;
+}
+
 static void
 ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
                 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -441,6 +609,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        old_obj = intel_plane->obj;
 
+       intel_plane->crtc_x = crtc_x;
+       intel_plane->crtc_y = crtc_y;
+       intel_plane->crtc_w = crtc_w;
+       intel_plane->crtc_h = crtc_h;
+       intel_plane->src_x = src_x;
+       intel_plane->src_y = src_y;
+       intel_plane->src_w = src_w;
+       intel_plane->src_h = src_h;
+
        src_w = src_w >> 16;
        src_h = src_h >> 16;
 
@@ -513,6 +690,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        mutex_lock(&dev->struct_mutex);
 
+       /* Note that this will apply the VT-d workaround for scanouts,
+        * which is more restrictive than required for sprites. (The
+        * primary plane requires 256KiB alignment with 64 PTE padding,
+        * the sprite planes only require 128KiB alignment and 32 PTE padding.
+        */
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
        if (ret)
                goto out_unlock;
@@ -568,6 +750,8 @@ intel_disable_plane(struct drm_plane *plane)
        if (!intel_plane->obj)
                goto out;
 
+       intel_wait_for_vblank(dev, intel_plane->pipe);
+
        mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(intel_plane->obj);
        intel_plane->obj = NULL;
@@ -647,6 +831,20 @@ out_unlock:
        return ret;
 }
 
+void intel_plane_restore(struct drm_plane *plane)
+{
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+
+       if (!plane->crtc || !plane->fb)
+               return;
+
+       intel_update_plane(plane, plane->crtc, plane->fb,
+                          intel_plane->crtc_x, intel_plane->crtc_y,
+                          intel_plane->crtc_w, intel_plane->crtc_h,
+                          intel_plane->src_x, intel_plane->src_y,
+                          intel_plane->src_w, intel_plane->src_h);
+}
+
 static const struct drm_plane_funcs intel_plane_funcs = {
        .update_plane = intel_update_plane,
        .disable_plane = intel_disable_plane,
@@ -670,8 +868,22 @@ static uint32_t snb_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
+static uint32_t vlv_plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_ABGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
 int
-intel_plane_init(struct drm_device *dev, enum pipe pipe)
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 {
        struct intel_plane *intel_plane;
        unsigned long possible_crtcs;
@@ -710,14 +922,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
                        intel_plane->can_scale = false;
                else
                        intel_plane->can_scale = true;
-               intel_plane->max_downscale = 2;
-               intel_plane->update_plane = ivb_update_plane;
-               intel_plane->disable_plane = ivb_disable_plane;
-               intel_plane->update_colorkey = ivb_update_colorkey;
-               intel_plane->get_colorkey = ivb_get_colorkey;
-
-               plane_formats = snb_plane_formats;
-               num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+               if (IS_VALLEYVIEW(dev)) {
+                       intel_plane->max_downscale = 1;
+                       intel_plane->update_plane = vlv_update_plane;
+                       intel_plane->disable_plane = vlv_disable_plane;
+                       intel_plane->update_colorkey = vlv_update_colorkey;
+                       intel_plane->get_colorkey = vlv_get_colorkey;
+
+                       plane_formats = vlv_plane_formats;
+                       num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+               } else {
+                       intel_plane->max_downscale = 2;
+                       intel_plane->update_plane = ivb_update_plane;
+                       intel_plane->disable_plane = ivb_disable_plane;
+                       intel_plane->update_colorkey = ivb_update_colorkey;
+                       intel_plane->get_colorkey = ivb_get_colorkey;
+
+                       plane_formats = snb_plane_formats;
+                       num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+               }
                break;
 
        default:
@@ -726,6 +950,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
        }
 
        intel_plane->pipe = pipe;
+       intel_plane->plane = plane;
        possible_crtcs = (1 << pipe);
        ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
                             &intel_plane_funcs,
index d808421c1c808e2b0f49480d73968ebae26d07ed..66737265200f63bf0735d7a67ec8ca122211b6fe 100644 (file)
@@ -905,11 +905,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
 
 
 static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
+intel_tv_compute_config(struct intel_encoder *encoder,
+                       struct intel_crtc_config *pipe_config)
 {
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
        if (!tv_mode)
@@ -918,7 +917,10 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
        if (intel_encoder_check_is_cloned(&intel_tv->base))
                return false;
 
-       adjusted_mode->clock = tv_mode->clock;
+       pipe_config->adjusted_mode.clock = tv_mode->clock;
+       DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+       pipe_config->pipe_bpp = 8*3;
+
        return true;
 }
 
@@ -1485,7 +1487,6 @@ out:
 }
 
 static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
-       .mode_fixup = intel_tv_mode_fixup,
        .mode_set = intel_tv_mode_set,
 };
 
@@ -1620,6 +1621,7 @@ intel_tv_init(struct drm_device *dev)
        drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
                         DRM_MODE_ENCODER_TVDAC);
 
+       intel_encoder->compute_config = intel_tv_compute_config;
        intel_encoder->enable = intel_enable_tv;
        intel_encoder->disable = intel_disable_tv;
        intel_encoder->get_hw_state = intel_tv_get_hw_state;
index 4d932c46725dda950a4b2cd197e1923fca18ab50..dcfc973e29f726dbffc281a63bc6e2dc08254257 100644 (file)
@@ -215,7 +215,7 @@ mgag200_bo(struct ttm_buffer_object *bo)
 {
        return container_of(bo, struct mgag200_bo, bo);
 }
-                               /* mga_crtc.c */
+                               /* mgag200_crtc.c */
 void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                             u16 blue, int regno);
 void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -225,7 +225,7 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 int mgag200_modeset_init(struct mga_device *mdev);
 void mgag200_modeset_fini(struct mga_device *mdev);
 
-                               /* mga_fbdev.c */
+                               /* mgag200_fb.c */
 int mgag200_fbdev_init(struct mga_device *mdev);
 void mgag200_fbdev_fini(struct mga_device *mdev);
 
@@ -254,7 +254,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
                         struct drm_device *dev,
                         uint32_t handle,
                         uint64_t *offset);
-                               /* mga_i2c.c */
+                               /* mgag200_i2c.c */
 struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
 void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
 
index d2253f6394817dc5a0c1cf22ffb64887d0452846..2ebe0f635b26ae197eb0d01b0fd26b5dd7916183 100644 (file)
@@ -249,7 +249,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
        struct mga_fbdev *mfbdev;
        int ret;
 
-       mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+       mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
        if (!mfbdev)
                return -ENOMEM;
 
@@ -258,10 +258,9 @@ int mgag200_fbdev_init(struct mga_device *mdev)
 
        ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
                                 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
-       if (ret) {
-               kfree(mfbdev);
+       if (ret)
                return ret;
-       }
+
        drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
 
        /* disable all the possible outputs/crtcs before entering KMS mode */
@@ -278,6 +277,4 @@ void mgag200_fbdev_fini(struct mga_device *mdev)
                return;
 
        mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
-       kfree(mdev->mfbdev);
-       mdev->mfbdev = NULL;
 }
index 64297c72464f996404730df6f3ec2eb67beccc33..1f7ea057b2fc4b484bc88d10fe7eb8baa13f5d6d 100644 (file)
@@ -76,15 +76,6 @@ static const struct drm_mode_config_funcs mga_mode_funcs = {
        .fb_create = mgag200_user_framebuffer_create,
 };
 
-/* Unmap the framebuffer from the core and release the memory */
-static void mga_vram_fini(struct mga_device *mdev)
-{
-       pci_iounmap(mdev->dev->pdev, mdev->rmmio);
-       mdev->rmmio = NULL;
-       if (mdev->mc.vram_base)
-               release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
-}
-
 static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 {
        int offset;
@@ -140,7 +131,7 @@ static int mga_vram_init(struct mga_device *mdev)
        remove_conflicting_framebuffers(aper, "mgafb", true);
        kfree(aper);
 
-       if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+       if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
                                "mgadrmfb_vram")) {
                DRM_ERROR("can't reserve VRAM\n");
                return -ENXIO;
@@ -173,13 +164,13 @@ static int mgag200_device_init(struct drm_device *dev,
        mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
        mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
 
-       if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+       if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
                                "mgadrmfb_mmio")) {
                DRM_ERROR("can't reserve mmio registers\n");
                return -ENOMEM;
        }
 
-       mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+       mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
        if (mdev->rmmio == NULL)
                return -ENOMEM;
 
@@ -188,10 +179,8 @@ static int mgag200_device_init(struct drm_device *dev,
                mdev->reg_1e24 = RREG32(0x1e24);
 
        ret = mga_vram_init(mdev);
-       if (ret) {
-               release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+       if (ret)
                return ret;
-       }
 
        mdev->bpp_shifts[0] = 0;
        mdev->bpp_shifts[1] = 1;
@@ -200,12 +189,6 @@ static int mgag200_device_init(struct drm_device *dev,
        return 0;
 }
 
-void mgag200_device_fini(struct mga_device *mdev)
-{
-       release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
-       mga_vram_fini(mdev);
-}
-
 /*
  * Functions here will be called by the core once it's bound the driver to
  * a PCI device
@@ -217,7 +200,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
        struct mga_device *mdev;
        int r;
 
-       mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+       mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
        if (mdev == NULL)
                return -ENOMEM;
        dev->dev_private = (void *)mdev;
@@ -258,8 +241,6 @@ int mgag200_driver_unload(struct drm_device *dev)
        mgag200_fbdev_fini(mdev);
        drm_mode_config_cleanup(dev);
        mgag200_mm_fini(mdev);
-       mgag200_device_fini(mdev);
-       kfree(mdev);
        dev->dev_private = NULL;
        return 0;
 }
index 3b6dc883e150a4b8d3493cfd6104d7f827967e98..5eb3e0da7c6eb1eb213e527021668ddd96bccaa1 100644 (file)
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_device *device = nv_device(drm->device);
        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-       struct nouveau_abi16_chan *chan, *temp;
+       struct nouveau_abi16_chan *chan = NULL, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        struct nouveau_object *object;
        struct nv_dma_class args = {};
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
                return nouveau_abi16_put(abi16, -EINVAL);
 
-       list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
-               if (chan->chan->handle == (NVDRM_CHAN | info->channel))
+       list_for_each_entry(temp, &abi16->channels, head) {
+               if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
+                       chan = temp;
                        break;
-               chan = NULL;
+               }
        }
 
        if (!chan)
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
        struct drm_nouveau_gpuobj_free *fini = data;
        struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-       struct nouveau_abi16_chan *chan, *temp;
+       struct nouveau_abi16_chan *chan = NULL, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        int ret;
 
        if (unlikely(!abi16))
                return -ENOMEM;
 
-       list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
-               if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
+       list_for_each_entry(temp, &abi16->channels, head) {
+               if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
+                       chan = temp;
                        break;
-               chan = NULL;
+               }
        }
 
        if (!chan)
index d1099365bfc1f47d6aa1be0e640f66c3fb66fcf2..c95decf543e904cbe89a6792fa9c709a2b87ad17 100644 (file)
@@ -71,12 +71,26 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
 
 static struct drm_driver driver;
 
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+       struct nouveau_drm *drm =
+               container_of(event, struct nouveau_drm, vblank[head]);
+       drm_handle_vblank(drm->dev, head);
+       return NVKM_EVENT_KEEP;
+}
+
 static int
 nouveau_drm_vblank_enable(struct drm_device *dev, int head)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-       nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+
+       if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
+               return -EIO;
+       WARN_ON_ONCE(drm->vblank[head].func);
+       drm->vblank[head].func = nouveau_drm_vblank_handler;
+       nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
        return 0;
 }
 
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-       nouveau_event_put(pdisp->vblank, head, &drm->vblank);
-}
-
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
-       struct nouveau_drm *drm =
-               container_of(event, struct nouveau_drm, vblank);
-       drm_handle_vblank(drm->dev, head);
-       return NVKM_EVENT_KEEP;
+       if (drm->vblank[head].func)
+               nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
+       else
+               WARN_ON_ONCE(1);
+       drm->vblank[head].func = NULL;
 }
 
 static u64
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 
        dev->dev_private = drm;
        drm->dev = dev;
-       drm->vblank.func = nouveau_drm_vblank_handler;
 
        INIT_LIST_HEAD(&drm->clients);
        spin_lock_init(&drm->tile.lock);
index b25df374c901df36f1c6e9408225ef9a2ed4b813..9c39bafbef2c23680fa757fdd251e158a56ae931 100644 (file)
@@ -113,7 +113,7 @@ struct nouveau_drm {
        struct nvbios vbios;
        struct nouveau_display *display;
        struct backlight_device *backlight;
-       struct nouveau_eventh vblank;
+       struct nouveau_eventh vblank[4];
 
        /* power management */
        struct nouveau_pm *pm;
index c451c41a7a7d2f5b2333dc2217a2adc2db061ecc..912759daf5625a88bb878f96a15f1ad39bd4c2a2 100644 (file)
@@ -110,6 +110,11 @@ static enum drm_connector_status omap_connector_detect(
                        ret = connector_status_connected;
                else
                        ret = connector_status_disconnected;
+       } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
+                       dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
+               ret = connector_status_connected;
        } else {
                ret = connector_status_unknown;
        }
@@ -189,12 +194,30 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
        struct omap_video_timings timings = {0};
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *new_mode;
-       int ret = MODE_BAD;
+       int r, ret = MODE_BAD;
 
        copy_timings_drm_to_omap(&timings, mode);
        mode->vrefresh = drm_mode_vrefresh(mode);
 
-       if (!dssdrv->check_timings(dssdev, &timings)) {
+       /*
+        * if the panel driver doesn't have a check_timings, it's most likely
+        * a fixed resolution panel, check if the timings match with the
+        * panel's timings
+        */
+       if (dssdrv->check_timings) {
+               r = dssdrv->check_timings(dssdev, &timings);
+       } else {
+               struct omap_video_timings t = {0};
+
+               dssdrv->get_timings(dssdev, &t);
+
+               if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+                       r = -EINVAL;
+               else
+                       r = 0;
+       }
+
+       if (!r) {
                /* check if vrefresh is still valid */
                new_mode = drm_mode_duplicate(dev, mode);
                new_mode->clock = timings.pixel_clock;
index bec66a490b8f7ded93ff3f766c562a8a76967f2e..79b200aee18a50e4b82277a0e2b3e380ab8cf280 100644 (file)
@@ -74,6 +74,13 @@ struct omap_crtc {
        struct work_struct page_flip_work;
 };
 
+uint32_t pipe2vbl(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       return dispc_mgr_get_vsync_irq(omap_crtc->channel);
+}
+
 /*
  * Manager-ops, callbacks from output when they need to configure
  * the upstream part of the video pipe.
@@ -613,7 +620,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
        omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
        omap_crtc->apply.post_apply = omap_crtc_post_apply;
 
-       omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+       omap_crtc->channel = channel;
+       omap_crtc->plane = plane;
+       omap_crtc->plane->crtc = crtc;
+       omap_crtc->name = channel_names[channel];
+       omap_crtc->pipe = id;
+
+       omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
        omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
 
        omap_crtc->error_irq.irqmask =
@@ -621,12 +634,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
        omap_crtc->error_irq.irq = omap_crtc_error_irq;
        omap_irq_register(dev, &omap_crtc->error_irq);
 
-       omap_crtc->channel = channel;
-       omap_crtc->plane = plane;
-       omap_crtc->plane->crtc = crtc;
-       omap_crtc->name = channel_names[channel];
-       omap_crtc->pipe = id;
-
        /* temporary: */
        omap_crtc->mgr.id = channel;
 
index 079c54c6f94c974c966a32524cbd95210ed2572f..9c53c25e5201763a4673734d5ce32d72043f5a35 100644 (file)
@@ -74,54 +74,53 @@ static int get_connector_type(struct omap_dss_device *dssdev)
        }
 }
 
+static bool channel_used(struct drm_device *dev, enum omap_channel channel)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < priv->num_crtcs; i++) {
+               struct drm_crtc *crtc = priv->crtcs[i];
+
+               if (omap_crtc_channel(crtc) == channel)
+                       return true;
+       }
+
+       return false;
+}
+
 static int omap_modeset_init(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_dss_device *dssdev = NULL;
        int num_ovls = dss_feat_get_num_ovls();
-       int id;
+       int num_mgrs = dss_feat_get_num_mgrs();
+       int num_crtcs;
+       int i, id = 0;
 
        drm_mode_config_init(dev);
 
        omap_drm_irq_install(dev);
 
        /*
-        * Create private planes and CRTCs for the last NUM_CRTCs overlay
-        * plus manager:
+        * We usually don't want to create a CRTC for each manager, at least
+        * not until we have a way to expose private planes to userspace.
+        * Otherwise there would not be enough video pipes left for drm planes.
+        * We use the num_crtc argument to limit the number of crtcs we create.
         */
-       for (id = 0; id < min(num_crtc, num_ovls); id++) {
-               struct drm_plane *plane;
-               struct drm_crtc *crtc;
-
-               plane = omap_plane_init(dev, id, true);
-               crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
+       num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
 
-               BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-               priv->crtcs[id] = crtc;
-               priv->num_crtcs++;
-
-               priv->planes[id] = plane;
-               priv->num_planes++;
-       }
-
-       /*
-        * Create normal planes for the remaining overlays:
-        */
-       for (; id < num_ovls; id++) {
-               struct drm_plane *plane = omap_plane_init(dev, id, false);
-
-               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-               priv->planes[priv->num_planes++] = plane;
-       }
+       dssdev = NULL;
 
        for_each_dss_dev(dssdev) {
                struct drm_connector *connector;
                struct drm_encoder *encoder;
+               enum omap_channel channel;
 
                if (!dssdev->driver) {
                        dev_warn(dev->dev, "%s has no driver.. skipping it\n",
                                        dssdev->name);
-                       return 0;
+                       continue;
                }
 
                if (!(dssdev->driver->get_timings ||
@@ -129,7 +128,7 @@ static int omap_modeset_init(struct drm_device *dev)
                        dev_warn(dev->dev, "%s driver does not support "
                                "get_timings or read_edid.. skipping it!\n",
                                dssdev->name);
-                       return 0;
+                       continue;
                }
 
                encoder = omap_encoder_init(dev, dssdev);
@@ -157,16 +156,118 @@ static int omap_modeset_init(struct drm_device *dev)
 
                drm_mode_connector_attach_encoder(connector, encoder);
 
+               /*
+                * if we have reached the limit of the crtcs we are allowed to
+                * create, let's not try to look for a crtc for this
+                * panel/encoder and onwards, we will, of course, populate the
+                * the possible_crtcs field for all the encoders with the final
+                * set of crtcs we create
+                */
+               if (id == num_crtcs)
+                       continue;
+
+               /*
+                * get the recommended DISPC channel for this encoder. For now,
+                * we only try to get create a crtc out of the recommended, the
+                * other possible channels to which the encoder can connect are
+                * not considered.
+                */
+               channel = dssdev->output->dispc_channel;
+
+               /*
+                * if this channel hasn't already been taken by a previously
+                * allocated crtc, we create a new crtc for it
+                */
+               if (!channel_used(dev, channel)) {
+                       struct drm_plane *plane;
+                       struct drm_crtc *crtc;
+
+                       plane = omap_plane_init(dev, id, true);
+                       crtc = omap_crtc_init(dev, plane, channel, id);
+
+                       BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+                       priv->crtcs[id] = crtc;
+                       priv->num_crtcs++;
+
+                       priv->planes[id] = plane;
+                       priv->num_planes++;
+
+                       id++;
+               }
+       }
+
+       /*
+        * we have allocated crtcs according to the need of the panels/encoders,
+        * adding more crtcs here if needed
+        */
+       for (; id < num_crtcs; id++) {
+
+               /* find a free manager for this crtc */
+               for (i = 0; i < num_mgrs; i++) {
+                       if (!channel_used(dev, i)) {
+                               struct drm_plane *plane;
+                               struct drm_crtc *crtc;
+
+                               plane = omap_plane_init(dev, id, true);
+                               crtc = omap_crtc_init(dev, plane, i, id);
+
+                               BUG_ON(priv->num_crtcs >=
+                                       ARRAY_SIZE(priv->crtcs));
+
+                               priv->crtcs[id] = crtc;
+                               priv->num_crtcs++;
+
+                               priv->planes[id] = plane;
+                               priv->num_planes++;
+
+                               break;
+                       } else {
+                               continue;
+                       }
+               }
+
+               if (i == num_mgrs) {
+                       /* this shouldn't really happen */
+                       dev_err(dev->dev, "no managers left for crtc\n");
+                       return -ENOMEM;
+               }
+       }
+
+       /*
+        * Create normal planes for the remaining overlays:
+        */
+       for (; id < num_ovls; id++) {
+               struct drm_plane *plane = omap_plane_init(dev, id, false);
+
+               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+               priv->planes[priv->num_planes++] = plane;
+       }
+
+       for (i = 0; i < priv->num_encoders; i++) {
+               struct drm_encoder *encoder = priv->encoders[i];
+               struct omap_dss_device *dssdev =
+                                       omap_encoder_get_dssdev(encoder);
+
                /* figure out which crtc's we can connect the encoder to: */
                encoder->possible_crtcs = 0;
                for (id = 0; id < priv->num_crtcs; id++) {
-                       enum omap_dss_output_id supported_outputs =
-                                       dss_feat_get_supported_outputs(pipe2chan(id));
+                       struct drm_crtc *crtc = priv->crtcs[id];
+                       enum omap_channel crtc_channel;
+                       enum omap_dss_output_id supported_outputs;
+
+                       crtc_channel = omap_crtc_channel(crtc);
+                       supported_outputs =
+                               dss_feat_get_supported_outputs(crtc_channel);
+
                        if (supported_outputs & dssdev->output->id)
                                encoder->possible_crtcs |= (1 << id);
                }
        }
 
+       DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
+               priv->num_planes, priv->num_crtcs, priv->num_encoders,
+               priv->num_connectors);
+
        dev->mode_config.min_width = 32;
        dev->mode_config.min_height = 32;
 
@@ -303,7 +404,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
        DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -567,7 +668,7 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
 };
 #endif
 
-struct platform_driver pdev = {
+static struct platform_driver pdev = {
                .driver = {
                        .name = DRIVER_NAME,
                        .owner = THIS_MODULE,
index d4f997bb4ac0eaab02f6419d4c832246959bbb45..215a20dd340cc8984ddffad32b0c7c42010d06c1 100644 (file)
@@ -139,8 +139,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
 int omap_gem_resume(struct device *dev);
 #endif
 
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
 irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
 void omap_irq_preinstall(struct drm_device *dev);
 int omap_irq_postinstall(struct drm_device *dev);
@@ -271,39 +271,9 @@ static inline int align_pitch(int pitch, int width, int bpp)
        return ALIGN(pitch, 8 * bytespp);
 }
 
-static inline enum omap_channel pipe2chan(int pipe)
-{
-       int num_mgrs = dss_feat_get_num_mgrs();
-
-       /*
-        * We usually don't want to create a CRTC for each manager,
-        * at least not until we have a way to expose private planes
-        * to userspace.  Otherwise there would not be enough video
-        * pipes left for drm planes.  The higher #'d managers tend
-        * to have more features so start in reverse order.
-        */
-       return num_mgrs - pipe - 1;
-}
-
 /* map crtc to vblank mask */
-static inline uint32_t pipe2vbl(int crtc)
-{
-       enum omap_channel channel = pipe2chan(crtc);
-       return dispc_mgr_get_vsync_irq(channel);
-}
-
-static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
-               if (priv->crtcs[i] == crtc)
-                       return i;
-
-       BUG();  /* bogus CRTC ptr */
-       return -1;
-}
+uint32_t pipe2vbl(struct drm_crtc *crtc);
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
 
 /* should these be made into common util helpers?
  */
index 21d126d0317ebc31ad5b28938ac1ef0452e6bbe1..c29451ba65daeeb06c068c013c926e4652a5432f 100644 (file)
@@ -41,6 +41,13 @@ struct omap_encoder {
        struct omap_dss_device *dssdev;
 };
 
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
+{
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+       return omap_encoder->dssdev;
+}
+
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -128,13 +135,26 @@ int omap_encoder_update(struct drm_encoder *encoder,
 
        dssdev->output->manager = mgr;
 
-       ret = dssdrv->check_timings(dssdev, timings);
+       if (dssdrv->check_timings) {
+               ret = dssdrv->check_timings(dssdev, timings);
+       } else {
+               struct omap_video_timings t = {0};
+
+               dssdrv->get_timings(dssdev, &t);
+
+               if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+                       ret = -EINVAL;
+               else
+                       ret = 0;
+       }
+
        if (ret) {
                dev_err(dev->dev, "could not set timings: %d\n", ret);
                return ret;
        }
 
-       dssdrv->set_timings(dssdev, timings);
+       if (dssdrv->set_timings)
+               dssdrv->set_timings(dssdev, timings);
 
        return 0;
 }
index ac74d1bc67bfc885e740e6adc809a0bd5c8d2b1d..0682cb5c01506576b71fe942bbb2127a3427e45c 100644 (file)
@@ -178,7 +178,7 @@ out_unlock:
        return omap_gem_mmap_obj(obj, vma);
 }
 
-struct dma_buf_ops omap_dmabuf_ops = {
+static struct dma_buf_ops omap_dmabuf_ops = {
                .map_dma_buf = omap_gem_map_dma_buf,
                .unmap_dma_buf = omap_gem_unmap_dma_buf,
                .release = omap_gem_dmabuf_release,
index e01303ee00c3b9d64ac48b4e5a956450f4920435..9263db117ff8ae937dbf743ac0e8a267264d95a4 100644 (file)
@@ -130,12 +130,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
  * Zero on success, appropriate errno if the given @crtc's vblank
  * interrupt cannot be enabled.
  */
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
 {
        struct omap_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = priv->crtcs[crtc_id];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc);
+       DBG("dev=%p, crtc=%d", dev, crtc_id);
 
        dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
@@ -156,12 +157,13 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
  * a hardware vblank counter, this routine should be a no-op, since
  * interrupts will have to stay on to keep the count accurate.
  */
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
 {
        struct omap_drm_private *priv = dev->dev_private;
+       struct drm_crtc *crtc = priv->crtcs[crtc_id];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc);
+       DBG("dev=%p, crtc=%d", dev, crtc_id);
 
        dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
@@ -186,9 +188,12 @@ irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
 
        VERB("irqs: %08x", irqstatus);
 
-       for (id = 0; id < priv->num_crtcs; id++)
-               if (irqstatus & pipe2vbl(id))
+       for (id = 0; id < priv->num_crtcs; id++) {
+               struct drm_crtc *crtc = priv->crtcs[id];
+
+               if (irqstatus & pipe2vbl(crtc))
                        drm_handle_vblank(dev, id);
+       }
 
        spin_lock_irqsave(&list_lock, flags);
        list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
index 2882cda6ea19de38039f498a9a7fa31e488d92c7..8d225d7ff4e300319211fd8ec80eeccfb4e8fdc2 100644 (file)
@@ -247,6 +247,12 @@ static int omap_plane_update(struct drm_plane *plane,
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
        omap_plane->enabled = true;
+
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       drm_framebuffer_reference(fb);
+
        return omap_plane_mode_set(plane, crtc, fb,
                        crtc_x, crtc_y, crtc_w, crtc_h,
                        src_x, src_y, src_w, src_h,
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644 (file)
index 0000000..2f1a57e
--- /dev/null
@@ -0,0 +1,10 @@
+config DRM_QXL
+       tristate "QXL virtual GPU"
+       depends on DRM && PCI
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+       help
+               QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644 (file)
index 0000000..ea046ba
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644 (file)
index 0000000..736365e
--- /dev/null
@@ -0,0 +1,690 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+       struct qxl_ring_header      header;
+       uint8_t                     elements[0];
+};
+
+struct qxl_ring {
+       struct ring            *ring;
+       int                     element_size;
+       int                     n_elements;
+       int                     prod_notify;
+       wait_queue_head_t      *push_event;
+       spinlock_t             lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+       kfree(ring);
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+               int element_size,
+               int n_elements,
+               int prod_notify,
+               bool set_prod_notify,
+               wait_queue_head_t *push_event)
+{
+       struct qxl_ring *ring;
+
+       ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       ring->ring = (struct ring *)header;
+       ring->element_size = element_size;
+       ring->n_elements = n_elements;
+       ring->prod_notify = prod_notify;
+       ring->push_event = push_event;
+       if (set_prod_notify)
+               header->notify_on_prod = ring->n_elements;
+       spin_lock_init(&ring->lock);
+       return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+       int ret;
+       struct qxl_ring_header *header = &(ring->ring->header);
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       ret = header->prod - header->cons < header->num_items;
+       if (ret == 0)
+               header->notify_on_cons = header->cons + 1;
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return ret;
+}
+
+static int qxl_check_idle(struct qxl_ring *ring)
+{
+       int ret;
+       struct qxl_ring_header *header = &(ring->ring->header);
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       ret = header->prod == header->cons;
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+                 const void *new_elt, bool interruptible)
+{
+       struct qxl_ring_header *header = &(ring->ring->header);
+       uint8_t *elt;
+       int idx, ret;
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       if (header->prod - header->cons == header->num_items) {
+               header->notify_on_cons = header->cons + 1;
+               mb();
+               spin_unlock_irqrestore(&ring->lock, flags);
+               if (!drm_can_sleep()) {
+                       while (!qxl_check_header(ring))
+                               udelay(1);
+               } else {
+                       if (interruptible) {
+                               ret = wait_event_interruptible(*ring->push_event,
+                                                              qxl_check_header(ring));
+                               if (ret)
+                                       return ret;
+                       } else {
+                               wait_event(*ring->push_event,
+                                          qxl_check_header(ring));
+                       }
+
+               }
+               spin_lock_irqsave(&ring->lock, flags);
+       }
+
+       idx = header->prod & (ring->n_elements - 1);
+       elt = ring->ring->elements + idx * ring->element_size;
+
+       memcpy((void *)elt, new_elt, ring->element_size);
+
+       header->prod++;
+
+       mb();
+
+       if (header->prod == header->notify_on_prod)
+               outb(0, ring->prod_notify);
+
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return 0;
+}
+
+static bool qxl_ring_pop(struct qxl_ring *ring,
+                        void *element)
+{
+       volatile struct qxl_ring_header *header = &(ring->ring->header);
+       volatile uint8_t *ring_elt;
+       int idx;
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+       if (header->cons == header->prod) {
+               header->notify_on_prod = header->cons + 1;
+               spin_unlock_irqrestore(&ring->lock, flags);
+               return false;
+       }
+
+       idx = header->cons & (ring->n_elements - 1);
+       ring_elt = ring->ring->elements + idx * ring->element_size;
+
+       memcpy(element, (void *)ring_elt, ring->element_size);
+
+       header->cons++;
+
+       spin_unlock_irqrestore(&ring->lock, flags);
+       return true;
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                             uint32_t type, bool interruptible)
+{
+       struct qxl_command cmd;
+
+       cmd.type = type;
+       cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+       return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                            uint32_t type, bool interruptible)
+{
+       struct qxl_command cmd;
+
+       cmd.type = type;
+       cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+       return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+       if (!qxl_check_idle(qdev->release_ring)) {
+               queue_work(qdev->gc_queue, &qdev->gc_work);
+               if (flush)
+                       flush_work(&qdev->gc_work);
+               return true;
+       }
+       return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+       struct qxl_release *release;
+       uint64_t id, next_id;
+       int i = 0;
+       int ret;
+       union qxl_release_info *info;
+
+       while (qxl_ring_pop(qdev->release_ring, &id)) {
+               QXL_INFO(qdev, "popped %lld\n", id);
+               while (id) {
+                       release = qxl_release_from_id_locked(qdev, id);
+                       if (release == NULL)
+                               break;
+
+                       ret = qxl_release_reserve(qdev, release, false);
+                       if (ret) {
+                               qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
+                               DRM_ERROR("failed to reserve release %lld\n", id);
+                       }
+
+                       info = qxl_release_map(qdev, release);
+                       next_id = info->next;
+                       qxl_release_unmap(qdev, release, info);
+
+                       qxl_release_unreserve(qdev, release);
+                       QXL_INFO(qdev, "popped %lld, next %lld\n", id,
+                               next_id);
+
+                       switch (release->type) {
+                       case QXL_RELEASE_DRAWABLE:
+                       case QXL_RELEASE_SURFACE_CMD:
+                       case QXL_RELEASE_CURSOR_CMD:
+                               break;
+                       default:
+                               DRM_ERROR("unexpected release type\n");
+                               break;
+                       }
+                       id = next_id;
+
+                       qxl_release_free(qdev, release);
+                       ++i;
+               }
+       }
+
+       QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+
+       return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+                         struct qxl_bo **_bo)
+{
+       struct qxl_bo *bo;
+       int ret;
+
+       ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+                           QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+       if (ret) {
+               DRM_ERROR("failed to allocate VRAM BO\n");
+               return ret;
+       }
+       ret = qxl_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       *_bo = bo;
+       return 0;
+out_unref:
+       qxl_bo_unref(&bo);
+       return 0;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+{
+       int irq_num;
+       long addr = qdev->io_base + port;
+       int ret;
+
+       mutex_lock(&qdev->async_io_mutex);
+       irq_num = atomic_read(&qdev->irq_received_io_cmd);
+
+
+       if (qdev->last_sent_io_cmd > irq_num) {
+               ret = wait_event_interruptible(qdev->io_cmd_event,
+                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+               if (ret)
+                       goto out;
+               irq_num = atomic_read(&qdev->irq_received_io_cmd);
+       }
+       outb(val, addr);
+       qdev->last_sent_io_cmd = irq_num + 1;
+       ret = wait_event_interruptible(qdev->io_cmd_event,
+                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+out:
+       mutex_unlock(&qdev->async_io_mutex);
+       return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+       int ret;
+
+restart:
+       ret = wait_for_io_cmd_user(qdev, val, port);
+       if (ret == -ERESTARTSYS)
+               goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+                       const struct qxl_rect *area)
+{
+       int surface_id;
+       uint32_t surface_width, surface_height;
+       int ret;
+
+       if (!surf->hw_surf_alloc)
+               DRM_ERROR("got io update area with no hw surface\n");
+
+       if (surf->is_primary)
+               surface_id = 0;
+       else
+               surface_id = surf->surface_id;
+       surface_width = surf->surf.width;
+       surface_height = surf->surf.height;
+
+       if (area->left < 0 || area->top < 0 ||
+           area->right > surface_width || area->bottom > surface_height) {
+               qxl_io_log(qdev, "%s: not doing area update for "
+                          "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
+                          area->top, area->right, area->bottom, surface_width, surface_height);
+               return -EINVAL;
+       }
+       mutex_lock(&qdev->update_area_mutex);
+       qdev->ram_header->update_area = *area;
+       qdev->ram_header->update_surface = surface_id;
+       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+       mutex_unlock(&qdev->update_area_mutex);
+       return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+       wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+       wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
+                          unsigned height, unsigned offset, struct qxl_bo *bo)
+{
+       struct qxl_surface_create *create;
+
+       QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
+                qdev->ram_header);
+       create = &qdev->ram_header->create_surface;
+       create->format = bo->surf.format;
+       create->width = width;
+       create->height = height;
+       create->stride = bo->surf.stride;
+       create->mem = qxl_bo_physical_address(qdev, bo, offset);
+
+       QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
+                bo->kptr);
+
+       create->flags = QXL_SURF_FLAG_KEEP_DATA;
+       create->type = QXL_SURF_TYPE_PRIMARY;
+
+       wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+       QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+       wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+       vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
+       va_end(args);
+       /*
+        * DO not do a DRM output here - this will call printk, which will
+        * call back into qxl for rendering (qxl_fb)
+        */
+       outb(0, qdev->io_base + QXL_IO_LOG);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+       outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+       qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
+                  qdev->monitors_config ?
+                  qdev->monitors_config->count : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].width : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].height : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].x : -1,
+                  qdev->monitors_config && qdev->monitors_config->count ?
+                  qdev->monitors_config->heads[0].y : -1
+                  );
+
+       wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+                     struct qxl_bo *surf)
+{
+       uint32_t handle = -ENOMEM;
+       int idr_ret;
+       int count = 0;
+again:
+       if (idr_pre_get(&qdev->surf_id_idr, GFP_ATOMIC) == 0) {
+               DRM_ERROR("Out of memory for surf idr\n");
+               kfree(surf);
+               goto alloc_fail;
+       }
+
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_ret = idr_get_new_above(&qdev->surf_id_idr, NULL, 1, &handle);
+       spin_unlock(&qdev->surf_id_idr_lock);
+
+       if (idr_ret == -EAGAIN)
+               goto again;
+
+       if (handle >= qdev->rom->n_surfaces) {
+               count++;
+               spin_lock(&qdev->surf_id_idr_lock);
+               idr_remove(&qdev->surf_id_idr, handle);
+               spin_unlock(&qdev->surf_id_idr_lock);
+               qxl_reap_surface_id(qdev, 2);
+               goto again;
+       }
+       surf->surface_id = handle;
+
+       spin_lock(&qdev->surf_id_idr_lock);
+       qdev->last_alloced_surf_id = handle;
+       spin_unlock(&qdev->surf_id_idr_lock);
+ alloc_fail:
+       return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+                           uint32_t surface_id)
+{
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_remove(&qdev->surf_id_idr, surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf,
+                        struct ttm_mem_reg *new_mem)
+{
+       struct qxl_surface_cmd *cmd;
+       struct qxl_release *release;
+       int ret;
+
+       if (surf->hw_surf_alloc)
+               return 0;
+
+       ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+                                                NULL,
+                                                &release);
+       if (ret)
+               return ret;
+
+       cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_SURFACE_CMD_CREATE;
+       cmd->u.surface_create.format = surf->surf.format;
+       cmd->u.surface_create.width = surf->surf.width;
+       cmd->u.surface_create.height = surf->surf.height;
+       cmd->u.surface_create.stride = surf->surf.stride;
+       if (new_mem) {
+               int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+               struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+               /* TODO - need to hold one of the locks to read tbo.offset */
+               cmd->u.surface_create.data = slot->high_bits;
+
+               cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
+       } else
+               cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+       cmd->surface_id = surf->surface_id;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       surf->surf_create = release;
+
+       /* no need to add a release to the fence for this bo,
+          since it is only released when we ask to destroy the surface
+          and it would never signal otherwise */
+       qxl_fence_releaseable(qdev, release);
+
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+       qxl_release_unreserve(qdev, release);
+
+       surf->hw_surf_alloc = true;
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+       return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+                          struct qxl_bo *surf)
+{
+       struct qxl_surface_cmd *cmd;
+       struct qxl_release *release;
+       int ret;
+       int id;
+
+       if (!surf->hw_surf_alloc)
+               return 0;
+
+       ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+                                                surf->surf_create,
+                                                &release);
+       if (ret)
+               return ret;
+
+       surf->surf_create = NULL;
+       /* remove the surface from the idr, but not the surface id yet */
+       spin_lock(&qdev->surf_id_idr_lock);
+       idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+       spin_unlock(&qdev->surf_id_idr_lock);
+       surf->hw_surf_alloc = false;
+
+       id = surf->surface_id;
+       surf->surface_id = 0;
+
+       release->surface_release_id = id;
+       cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_SURFACE_CMD_DESTROY;
+       cmd->surface_id = id;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+       qxl_release_unreserve(qdev, release);
+
+
+       return 0;
+}
+
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+       struct qxl_rect rect;
+       int ret;
+
+       /* if we are evicting, we need to make sure the surface is up
+          to date */
+       rect.left = 0;
+       rect.right = surf->surf.width;
+       rect.top = 0;
+       rect.bottom = surf->surf.height;
+retry:
+       ret = qxl_io_update_area(qdev, surf, &rect);
+       if (ret == -ERESTARTSYS)
+               goto retry;
+       return ret;
+}
+
+static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+       /* no need to update area if we are just freeing the surface normally */
+       if (do_update_area)
+               qxl_update_surface(qdev, surf);
+
+       /* nuke the surface id at the hw */
+       qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+       mutex_lock(&qdev->surf_evict_mutex);
+       qxl_surface_evict_locked(qdev, surf, do_update_area);
+       mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+       int ret;
+
+       ret = qxl_bo_reserve(surf, false);
+       if (ret == -EBUSY)
+               return -EBUSY;
+
+       if (surf->fence.num_active_releases > 0 && stall == false) {
+               qxl_bo_unreserve(surf);
+               return -EBUSY;
+       }
+
+       if (stall)
+               mutex_unlock(&qdev->surf_evict_mutex);
+
+       spin_lock(&surf->tbo.bdev->fence_lock);
+       ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+       spin_unlock(&surf->tbo.bdev->fence_lock);
+
+       if (stall)
+               mutex_lock(&qdev->surf_evict_mutex);
+       if (ret == -EBUSY) {
+               qxl_bo_unreserve(surf);
+               return -EBUSY;
+       }
+
+       qxl_surface_evict_locked(qdev, surf, true);
+       qxl_bo_unreserve(surf);
+       return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+       int num_reaped = 0;
+       int i, ret;
+       bool stall = false;
+       int start = 0;
+
+       mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+       spin_lock(&qdev->surf_id_idr_lock);
+       start = qdev->last_alloced_surf_id + 1;
+       spin_unlock(&qdev->surf_id_idr_lock);
+
+       for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+               void *objptr;
+               int surfid = i % qdev->rom->n_surfaces;
+
+               /* this avoids the case where the objects is in the
+                  idr but has been evicted half way - its makes
+                  the idr lookup atomic with the eviction */
+               spin_lock(&qdev->surf_id_idr_lock);
+               objptr = idr_find(&qdev->surf_id_idr, surfid);
+               spin_unlock(&qdev->surf_id_idr_lock);
+
+               if (!objptr)
+                       continue;
+
+               ret = qxl_reap_surf(qdev, objptr, stall);
+               if (ret == 0)
+                       num_reaped++;
+               if (num_reaped >= max_to_reap)
+                       break;
+       }
+       if (num_reaped == 0 && stall == false) {
+               stall = true;
+               goto again;
+       }
+
+       mutex_unlock(&qdev->surf_evict_mutex);
+       if (num_reaped) {
+               usleep_range(500, 1000);
+               qxl_queue_garbage_collect(qdev, true);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644 (file)
index 0000000..c630152
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct qxl_device *qdev = node->minor->dev->dev_private;
+
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+       seq_printf(m, "%d\n", qdev->irq_received_error);
+       return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct qxl_device *qdev = node->minor->dev->dev_private;
+       struct qxl_bo *bo;
+
+       list_for_each_entry(bo, &qdev->gem.objects, list) {
+               seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
+                          (unsigned long)bo->gem_base.size, bo->pin_count,
+                          bo->tbo.sync_obj, bo->fence.num_active_releases);
+       }
+       return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+       { "irq_received", qxl_debugfs_irq_received, 0, NULL },
+       { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+       drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
+       return 0;
+}
+
+void
+qxl_debugfs_takedown(struct drm_minor *minor)
+{
+       drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+                                minor);
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+                         struct drm_info_list *files,
+                         unsigned nfiles)
+{
+       unsigned i;
+
+       for (i = 0; i < qdev->debugfs_count; i++) {
+               if (qdev->debugfs[i].files == files) {
+                       /* Already registered */
+                       return 0;
+               }
+       }
+
+       i = qdev->debugfs_count + 1;
+       if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+               DRM_ERROR("Reached maximum number of debugfs components.\n");
+               DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+               return -EINVAL;
+       }
+       qdev->debugfs[qdev->debugfs_count].files = files;
+       qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+       qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_create_files(files, nfiles,
+                                qdev->ddev->control->debugfs_root,
+                                qdev->ddev->control);
+       drm_debugfs_create_files(files, nfiles,
+                                qdev->ddev->primary->debugfs_root,
+                                qdev->ddev->primary);
+#endif
+       return 0;
+}
+
+void qxl_debugfs_remove_files(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned i;
+
+       for (i = 0; i < qdev->debugfs_count; i++) {
+               drm_debugfs_remove_files(qdev->debugfs[i].files,
+                                        qdev->debugfs[i].num_files,
+                                        qdev->ddev->control);
+               drm_debugfs_remove_files(qdev->debugfs[i].files,
+                                        qdev->debugfs[i].num_files,
+                                        qdev->ddev->primary);
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644 (file)
index 0000000..94c5aec
--- /dev/null
@@ -0,0 +1,879 @@
+/*
+   Copyright (C) 2009 Red Hat, Inc.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in
+        the documentation and/or other materials provided with the
+        distribution.
+       * Neither the name of the copyright holder nor the names of its
+        contributors may be used to endorse or promote products derived
+        from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+   IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+   TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+   PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+       SPICE_IMAGE_TYPE_BITMAP,
+       SPICE_IMAGE_TYPE_QUIC,
+       SPICE_IMAGE_TYPE_RESERVED,
+       SPICE_IMAGE_TYPE_LZ_PLT = 100,
+       SPICE_IMAGE_TYPE_LZ_RGB,
+       SPICE_IMAGE_TYPE_GLZ_RGB,
+       SPICE_IMAGE_TYPE_FROM_CACHE,
+       SPICE_IMAGE_TYPE_SURFACE,
+       SPICE_IMAGE_TYPE_JPEG,
+       SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+       SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+       SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+       SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+       SPICE_BITMAP_FMT_INVALID,
+       SPICE_BITMAP_FMT_1BIT_LE,
+       SPICE_BITMAP_FMT_1BIT_BE,
+       SPICE_BITMAP_FMT_4BIT_LE,
+       SPICE_BITMAP_FMT_4BIT_BE,
+       SPICE_BITMAP_FMT_8BIT,
+       SPICE_BITMAP_FMT_16BIT,
+       SPICE_BITMAP_FMT_24BIT,
+       SPICE_BITMAP_FMT_32BIT,
+       SPICE_BITMAP_FMT_RGBA,
+
+       SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+       SPICE_SURFACE_FMT_INVALID,
+       SPICE_SURFACE_FMT_1_A,
+       SPICE_SURFACE_FMT_8_A = 8,
+       SPICE_SURFACE_FMT_16_555 = 16,
+       SPICE_SURFACE_FMT_32_xRGB = 32,
+       SPICE_SURFACE_FMT_16_565 = 80,
+       SPICE_SURFACE_FMT_32_ARGB = 96,
+
+       SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+       SPICE_CLIP_TYPE_NONE,
+       SPICE_CLIP_TYPE_RECTS,
+
+       SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+       SPICE_ROPD_INVERS_SRC = (1 << 0),
+       SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+       SPICE_ROPD_INVERS_DEST = (1 << 2),
+       SPICE_ROPD_OP_PUT = (1 << 3),
+       SPICE_ROPD_OP_OR = (1 << 4),
+       SPICE_ROPD_OP_AND = (1 << 5),
+       SPICE_ROPD_OP_XOR = (1 << 6),
+       SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+       SPICE_ROPD_OP_WHITENESS = (1 << 8),
+       SPICE_ROPD_OP_INVERS = (1 << 9),
+       SPICE_ROPD_INVERS_RES = (1 << 10),
+
+       SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+       SPICE_BRUSH_TYPE_NONE,
+       SPICE_BRUSH_TYPE_SOLID,
+       SPICE_BRUSH_TYPE_PATTERN,
+
+       SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+       SPICE_CURSOR_TYPE_ALPHA,
+       SPICE_CURSOR_TYPE_MONO,
+       SPICE_CURSOR_TYPE_COLOR4,
+       SPICE_CURSOR_TYPE_COLOR8,
+       SPICE_CURSOR_TYPE_COLOR16,
+       SPICE_CURSOR_TYPE_COLOR24,
+       SPICE_CURSOR_TYPE_COLOR32,
+
+       SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+       QXL_REVISION_STABLE_V04 = 0x01,
+       QXL_REVISION_STABLE_V06 = 0x02,
+       QXL_REVISION_STABLE_V10 = 0x03,
+       QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+       QXL_RAM_RANGE_INDEX,
+       QXL_VRAM_RANGE_INDEX,
+       QXL_ROM_RANGE_INDEX,
+       QXL_IO_RANGE_INDEX,
+
+       QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+       QXL_IO_NOTIFY_CMD,
+       QXL_IO_NOTIFY_CURSOR,
+       QXL_IO_UPDATE_AREA,
+       QXL_IO_UPDATE_IRQ,
+       QXL_IO_NOTIFY_OOM,
+       QXL_IO_RESET,
+       QXL_IO_SET_MODE,                  /* qxl-1 */
+       QXL_IO_LOG,
+       /* appended for qxl-2 */
+       QXL_IO_MEMSLOT_ADD,
+       QXL_IO_MEMSLOT_DEL,
+       QXL_IO_DETACH_PRIMARY,
+       QXL_IO_ATTACH_PRIMARY,
+       QXL_IO_CREATE_PRIMARY,
+       QXL_IO_DESTROY_PRIMARY,
+       QXL_IO_DESTROY_SURFACE_WAIT,
+       QXL_IO_DESTROY_ALL_SURFACES,
+       /* appended for qxl-3 */
+       QXL_IO_UPDATE_AREA_ASYNC,
+       QXL_IO_MEMSLOT_ADD_ASYNC,
+       QXL_IO_CREATE_PRIMARY_ASYNC,
+       QXL_IO_DESTROY_PRIMARY_ASYNC,
+       QXL_IO_DESTROY_SURFACE_ASYNC,
+       QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+       QXL_IO_FLUSH_SURFACES_ASYNC,
+       QXL_IO_FLUSH_RELEASE,
+       /* appended for qxl-4 */
+       QXL_IO_MONITORS_CONFIG_ASYNC,
+
+       QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+       QXLFIXED x;
+       QXLFIXED y;
+};
+
+struct qxl_point {
+       int32_t x;
+       int32_t y;
+};
+
+struct qxl_point_1_6 {
+       int16_t x;
+       int16_t y;
+};
+
+struct qxl_rect {
+       int32_t top;
+       int32_t left;
+       int32_t bottom;
+       int32_t right;
+};
+
+struct qxl_urect {
+       uint32_t top;
+       uint32_t left;
+       uint32_t bottom;
+       uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+       uint32_t magic;
+       uint32_t id;
+       uint32_t update_id;
+       uint32_t compression_level;
+       uint32_t log_level;
+       uint32_t mode;                    /* qxl-1 */
+       uint32_t modes_offset;
+       uint32_t num_io_pages;
+       uint32_t pages_offset;            /* qxl-1 */
+       uint32_t draw_area_offset;        /* qxl-1 */
+       uint32_t surface0_area_size;      /* qxl-1 name: draw_area_size */
+       uint32_t ram_header_offset;
+       uint32_t mm_clock;
+       /* appended for qxl-2 */
+       uint32_t n_surfaces;
+       uint64_t flags;
+       uint8_t slots_start;
+       uint8_t slots_end;
+       uint8_t slot_gen_bits;
+       uint8_t slot_id_bits;
+       uint8_t slot_generation;
+       /* appended for qxl-4 */
+       uint8_t client_present;
+       uint8_t client_capabilities[58];
+       uint32_t client_monitors_config_crc;
+       struct {
+               uint16_t count;
+       uint16_t padding;
+               struct qxl_urect heads[64];
+       } client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+       uint32_t id;
+       uint32_t x_res;
+       uint32_t y_res;
+       uint32_t bits;
+       uint32_t stride;
+       uint32_t x_mili;
+       uint32_t y_mili;
+       uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+       uint32_t n_modes;
+       struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+       QXL_CMD_NOP,
+       QXL_CMD_DRAW,
+       QXL_CMD_UPDATE,
+       QXL_CMD_CURSOR,
+       QXL_CMD_MESSAGE,
+       QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+       QXLPHYSICAL data;
+       uint32_t type;
+       uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT                (1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP  (2<<0)
+
+struct qxl_command_ext {
+       struct qxl_command cmd;
+       uint32_t group_id;
+       uint32_t flags;
+};
+
+struct qxl_mem_slot {
+       uint64_t mem_start;
+       uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY     0
+
+#define QXL_SURF_FLAG_KEEP_DATA           (1 << 0)
+
+struct qxl_surface_create {
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       uint32_t format;
+       uint32_t position;
+       uint32_t mouse_mode;
+       uint32_t flags;
+       uint32_t type;
+       QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR  (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG  (1 << 5)
+
+struct qxl_ring_header {
+       uint32_t num_items;
+       uint32_t prod;
+       uint32_t notify_on_prod;
+       uint32_t cons;
+       uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+       uint32_t magic;
+       uint32_t int_pending;
+       uint32_t int_mask;
+       uint8_t log_buf[QXL_LOG_BUF_SIZE];
+       struct qxl_ring_header  cmd_ring_hdr;
+       struct qxl_command      cmd_ring[QXL_COMMAND_RING_SIZE];
+       struct qxl_ring_header  cursor_ring_hdr;
+       struct qxl_command      cursor_ring[QXL_CURSOR_RING_SIZE];
+       struct qxl_ring_header  release_ring_hdr;
+       uint64_t                release_ring[QXL_RELEASE_RING_SIZE];
+       struct qxl_rect update_area;
+       /* appended for qxl-2 */
+       uint32_t update_surface;
+       struct qxl_mem_slot mem_slot;
+       struct qxl_surface_create create_surface;
+       uint64_t flags;
+
+       /* appended for qxl-4 */
+
+       /* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+       QXLPHYSICAL monitors_config;
+       uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+       uint64_t id;      /* in  */
+       uint64_t next;    /* out */
+};
+
+struct qxl_release_info_ext {
+       union qxl_release_info *info;
+       uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+       uint32_t data_size;
+       QXLPHYSICAL prev_chunk;
+       QXLPHYSICAL next_chunk;
+       uint8_t data[0];
+};
+
+struct qxl_message {
+       union qxl_release_info release_info;
+       uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+       union qxl_release_info release_info;
+       struct qxl_rect area;
+       uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+       union qxl_release_info release_info;
+       struct qxl_rect area;
+       uint32_t update_id;
+       uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+       uint64_t unique;
+       uint16_t type;
+       uint16_t width;
+       uint16_t height;
+       uint16_t hot_spot_x;
+       uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+       struct qxl_cursor_header header;
+       uint32_t data_size;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_CURSOR_SET,
+       QXL_CURSOR_MOVE,
+       QXL_CURSOR_HIDE,
+       QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+       union qxl_release_info release_info;
+       uint8_t type;
+       union {
+               struct {
+                       struct qxl_point_1_6 position;
+                       uint8_t visible;
+                       QXLPHYSICAL shape;
+               } set;
+               struct {
+                       uint16_t length;
+                       uint16_t frequency;
+               } trail;
+               struct qxl_point_1_6 position;
+       } u;
+       /* todo: dynamic size from rom */
+       uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+       QXL_DRAW_NOP,
+       QXL_DRAW_FILL,
+       QXL_DRAW_OPAQUE,
+       QXL_DRAW_COPY,
+       QXL_COPY_BITS,
+       QXL_DRAW_BLEND,
+       QXL_DRAW_BLACKNESS,
+       QXL_DRAW_WHITENESS,
+       QXL_DRAW_INVERS,
+       QXL_DRAW_ROP3,
+       QXL_DRAW_STROKE,
+       QXL_DRAW_TEXT,
+       QXL_DRAW_TRANSPARENT,
+       QXL_DRAW_ALPHA_BLEND,
+       QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+       struct qxl_point render_pos;
+       struct qxl_point glyph_origin;
+       uint16_t width;
+       uint16_t height;
+       uint8_t data[0];
+};
+
+struct qxl_string {
+       uint32_t data_size;
+       uint16_t length;
+       uint16_t flags;
+       struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+       struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+       QXL_EFFECT_BLEND = 0,
+       QXL_EFFECT_OPAQUE = 1,
+       QXL_EFFECT_REVERT_ON_DUP = 2,
+       QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+       QXL_EFFECT_WHITENESS_ON_DUP = 4,
+       QXL_EFFECT_NOP_ON_DUP = 5,
+       QXL_EFFECT_NOP = 6,
+       QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+       QXLPHYSICAL pat;
+       struct qxl_point pos;
+};
+
+struct qxl_brush {
+       uint32_t type;
+       union {
+               uint32_t color;
+               struct qxl_pattern pattern;
+       } u;
+};
+
+struct qxl_q_mask {
+       uint8_t flags;
+       struct qxl_point pos;
+       QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+       struct qxl_brush brush;
+       uint16_t rop_descriptor;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       struct qxl_brush brush;
+       uint16_t rop_descriptor;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       uint16_t rop_descriptor;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       uint32_t src_color;
+       uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+       uint16_t alpha_flags;
+       uint8_t alpha;
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+       uint8_t alpha;
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+       QXLPHYSICAL src_bitmap;
+       struct qxl_rect src_area;
+       struct qxl_brush brush;
+       uint8_t rop3;
+       uint8_t scale_mode;
+       struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+       uint8_t flags;
+       uint8_t join_style;
+       uint8_t end_style;
+       uint8_t style_nseg;
+       QXLFIXED width;
+       QXLFIXED miter_limit;
+       QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+       QXLPHYSICAL path;
+       struct qxl_line_attr attr;
+       struct qxl_brush brush;
+       uint16_t fore_mode;
+       uint16_t back_mode;
+};
+
+struct qxl_text {
+       QXLPHYSICAL str;
+       struct qxl_rect back_area;
+       struct qxl_brush fore_brush;
+       struct qxl_brush back_brush;
+       uint16_t fore_mode;
+       uint16_t back_mode;
+};
+
+struct qxl_mask {
+       struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+       uint32_t type;
+       QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+       QXL_OP_CLEAR                     = 0x00,
+       QXL_OP_SOURCE                    = 0x01,
+       QXL_OP_DST                       = 0x02,
+       QXL_OP_OVER                      = 0x03,
+       QXL_OP_OVER_REVERSE              = 0x04,
+       QXL_OP_IN                        = 0x05,
+       QXL_OP_IN_REVERSE                = 0x06,
+       QXL_OP_OUT                       = 0x07,
+       QXL_OP_OUT_REVERSE               = 0x08,
+       QXL_OP_ATOP                      = 0x09,
+       QXL_OP_ATOP_REVERSE              = 0x0a,
+       QXL_OP_XOR                       = 0x0b,
+       QXL_OP_ADD                       = 0x0c,
+       QXL_OP_SATURATE                  = 0x0d,
+       /* Note the jump here from 0x0d to 0x30 */
+       QXL_OP_MULTIPLY                  = 0x30,
+       QXL_OP_SCREEN                    = 0x31,
+       QXL_OP_OVERLAY                   = 0x32,
+       QXL_OP_DARKEN                    = 0x33,
+       QXL_OP_LIGHTEN                   = 0x34,
+       QXL_OP_COLOR_DODGE               = 0x35,
+       QXL_OP_COLOR_BURN                = 0x36,
+       QXL_OP_HARD_LIGHT                = 0x37,
+       QXL_OP_SOFT_LIGHT                = 0x38,
+       QXL_OP_DIFFERENCE                = 0x39,
+       QXL_OP_EXCLUSION                 = 0x3a,
+       QXL_OP_HSL_HUE                   = 0x3b,
+       QXL_OP_HSL_SATURATION            = 0x3c,
+       QXL_OP_HSL_COLOR                 = 0x3d,
+       QXL_OP_HSL_LUMINOSITY            = 0x3e
+};
+
+struct qxl_transform {
+       uint32_t        t00;
+       uint32_t        t01;
+       uint32_t        t02;
+       uint32_t        t10;
+       uint32_t        t11;
+       uint32_t        t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ *     operator:               [  0 -  7 ]
+ *     src_filter:             [  8 - 10 ]
+ *     mask_filter:            [ 11 - 13 ]
+ *     src_repeat:             [ 14 - 15 ]
+ *     mask_repeat:            [ 16 - 17 ]
+ *     component_alpha:                [ 18 - 18 ]
+ *     reserved:               [ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ *             REPEAT_NONE =           0
+ *              REPEAT_NORMAL =                1
+ *             REPEAT_PAD =            2
+ *             REPEAT_REFLECT =        3
+ *
+ * The filter values are:
+ *             FILTER_NEAREST =        0
+ *             FILTER_BILINEAR =       1
+ */
+struct qxl_composite {
+       uint32_t                flags;
+
+       QXLPHYSICAL                     src;
+       QXLPHYSICAL                     src_transform;  /* May be NULL */
+       QXLPHYSICAL                     mask;           /* May be NULL */
+       QXLPHYSICAL                     mask_transform; /* May be NULL */
+       struct qxl_point_1_6    src_origin;
+       struct qxl_point_1_6    mask_origin;
+};
+
+struct qxl_compat_drawable {
+       union qxl_release_info release_info;
+       uint8_t effect;
+       uint8_t type;
+       uint16_t bitmap_offset;
+       struct qxl_rect bitmap_area;
+       struct qxl_rect bbox;
+       struct qxl_clip clip;
+       uint32_t mm_time;
+       union {
+               struct qxl_fill fill;
+               struct qxl_opaque opaque;
+               struct qxl_copy copy;
+               struct qxl_transparent transparent;
+               struct qxl_compat_alpha_blend alpha_blend;
+               struct qxl_copy_bits copy_bits;
+               struct qxl_copy blend;
+               struct qxl_rop_3 rop3;
+               struct qxl_stroke stroke;
+               struct qxl_text text;
+               struct qxl_mask blackness;
+               struct qxl_mask invers;
+               struct qxl_mask whiteness;
+       } u;
+};
+
+struct qxl_drawable {
+       union qxl_release_info release_info;
+       uint32_t surface_id;
+       uint8_t effect;
+       uint8_t type;
+       uint8_t self_bitmap;
+       struct qxl_rect self_bitmap_area;
+       struct qxl_rect bbox;
+       struct qxl_clip clip;
+       uint32_t mm_time;
+       int32_t surfaces_dest[3];
+       struct qxl_rect surfaces_rects[3];
+       union {
+               struct qxl_fill fill;
+               struct qxl_opaque opaque;
+               struct qxl_copy copy;
+               struct qxl_transparent transparent;
+               struct qxl_alpha_blend alpha_blend;
+               struct qxl_copy_bits copy_bits;
+               struct qxl_copy blend;
+               struct qxl_rop_3 rop3;
+               struct qxl_stroke stroke;
+               struct qxl_text text;
+               struct qxl_mask blackness;
+               struct qxl_mask invers;
+               struct qxl_mask whiteness;
+               struct qxl_composite composite;
+       } u;
+};
+
+enum qxl_surface_cmd_type {
+       QXL_SURFACE_CMD_CREATE,
+       QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+       union qxl_release_info release_info;
+       uint32_t surface_id;
+       uint8_t type;
+       uint32_t flags;
+       union {
+               struct qxl_surface surface_create;
+       } u;
+};
+
+struct qxl_clip_rects {
+       uint32_t num_rects;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_PATH_BEGIN = (1 << 0),
+       QXL_PATH_END = (1 << 1),
+       QXL_PATH_CLOSE = (1 << 3),
+       QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+       uint32_t flags;
+       uint32_t count;
+       struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+       uint32_t data_size;
+       struct qxl_data_chunk chunk;
+};
+
+enum {
+       QXL_IMAGE_GROUP_DRIVER,
+       QXL_IMAGE_GROUP_DEVICE,
+       QXL_IMAGE_GROUP_RED,
+       QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+       uint32_t group;
+       uint32_t unique;
+};
+
+union qxl_image_id_union {
+       struct qxl_image_id id;
+       uint64_t value;
+};
+
+enum qxl_image_flags {
+       QXL_IMAGE_CACHE = (1 << 0),
+       QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+       QXL_BITMAP_DIRECT = (1 << 0),
+       QXL_BITMAP_UNSTABLE = (1 << 1),
+       QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) {              \
+       (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group;  \
+}
+
+struct qxl_image_descriptor {
+       uint64_t id;
+       uint8_t type;
+       uint8_t flags;
+       uint32_t width;
+       uint32_t height;
+};
+
+struct qxl_palette {
+       uint64_t unique;
+       uint16_t num_ents;
+       uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+       uint8_t format;
+       uint8_t flags;
+       uint32_t x;
+       uint32_t y;
+       uint32_t stride;
+       QXLPHYSICAL palette;
+       QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+       uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+       uint32_t data_size;
+       uint8_t data[0];
+};
+
+struct qxl_image {
+       struct qxl_image_descriptor descriptor;
+       union { /* variable length */
+               struct qxl_bitmap bitmap;
+               struct qxl_encoder_data quic;
+               struct qxl_surface_id surface_image;
+       } u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+       uint32_t id;
+       uint32_t surface_id;
+       uint32_t width;
+       uint32_t height;
+       uint32_t x;
+       uint32_t y;
+       uint32_t flags;
+};
+
+struct qxl_monitors_config {
+       uint16_t count;
+       uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+                                driver */
+       struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644 (file)
index 0000000..fcfd443
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "linux/crc32.h"
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+#include "drm_crtc_helper.h"
+
+static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
+                                struct drm_connector *connector,
+                                struct qxl_head *head)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+       int width = head->width;
+       int height = head->height;
+
+       if (width < 320 || height < 240) {
+               qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
+               width = 1024;
+               height = 768;
+       }
+       if (width * height * 4 > 16*1024*1024) {
+               width = 1024;
+               height = 768;
+       }
+       /* TODO: go over regular modes and removed preferred? */
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+       mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+       mode->type |= DRM_MODE_TYPE_PREFERRED;
+       mode->status = MODE_OK;
+       drm_mode_probed_add(connector, mode);
+       qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
+}
+
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
+{
+       struct drm_connector *connector;
+       int i;
+       struct drm_device *dev = qdev->ddev;
+
+       i = 0;
+       qxl_io_log(qdev, "%s: %d, %d\n", __func__,
+                  dev->mode_config.num_connector,
+                  qdev->monitors_config->count);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (i > qdev->monitors_config->count) {
+                       /* crtc will be reported as disabled */
+                       continue;
+               }
+               qxl_crtc_set_to_mode(qdev, connector,
+                                    &qdev->monitors_config->heads[i]);
+               ++i;
+       }
+}
+
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+{
+       if (qdev->client_monitors_config &&
+           count > qdev->client_monitors_config->count) {
+               kfree(qdev->client_monitors_config);
+               qdev->client_monitors_config = NULL;
+       }
+       if (!qdev->client_monitors_config) {
+               qdev->client_monitors_config = kzalloc(
+                               sizeof(struct qxl_monitors_config) +
+                               sizeof(struct qxl_head) * count, GFP_KERNEL);
+               if (!qdev->client_monitors_config) {
+                       qxl_io_log(qdev,
+                                  "%s: allocation failure for %u heads\n",
+                                  __func__, count);
+                       return;
+               }
+       }
+       qdev->client_monitors_config->count = count;
+}
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+       int i;
+       int num_monitors;
+       uint32_t crc;
+
+       BUG_ON(!qdev->monitors_config);
+       num_monitors = qdev->rom->client_monitors_config.count;
+       crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+                 sizeof(qdev->rom->client_monitors_config));
+       if (crc != qdev->rom->client_monitors_config_crc) {
+               qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+                          sizeof(qdev->rom->client_monitors_config),
+                          qdev->rom->client_monitors_config_crc);
+               return 1;
+       }
+       if (num_monitors > qdev->monitors_config->max_allowed) {
+               DRM_INFO("client monitors list will be truncated: %d < %d\n",
+                        qdev->monitors_config->max_allowed, num_monitors);
+               num_monitors = qdev->monitors_config->max_allowed;
+       } else {
+               num_monitors = qdev->rom->client_monitors_config.count;
+       }
+       qxl_alloc_client_monitors_config(qdev, num_monitors);
+       /* we copy max from the client but it isn't used */
+       qdev->client_monitors_config->max_allowed =
+                               qdev->monitors_config->max_allowed;
+       for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+               struct qxl_urect *c_rect =
+                       &qdev->rom->client_monitors_config.heads[i];
+               struct qxl_head *client_head =
+                       &qdev->client_monitors_config->heads[i];
+               struct qxl_head *head = &qdev->monitors_config->heads[i];
+               client_head->x = head->x = c_rect->left;
+               client_head->y = head->y = c_rect->top;
+               client_head->width = head->width =
+                                               c_rect->right - c_rect->left;
+               client_head->height = head->height =
+                                               c_rect->bottom - c_rect->top;
+               client_head->surface_id = head->surface_id = 0;
+               client_head->id = head->id = i;
+               client_head->flags = head->flags = 0;
+               QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
+                         head->x, head->y);
+       }
+       return 0;
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+
+       while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+               qxl_io_log(qdev, "failed crc check for client_monitors_config,"
+                                " retrying\n");
+       }
+       qxl_crtc_set_from_monitors_config(qdev);
+       /* fire off a uevent and let userspace tell us what to do */
+       qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
+       drm_sysfs_hotplug_event(qdev->ddev);
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_output *output = drm_connector_to_qxl_output(connector);
+       int h = output->index;
+       struct drm_display_mode *mode = NULL;
+       struct qxl_head *head;
+
+       if (!qdev->monitors_config)
+               return 0;
+       head = &qdev->monitors_config->heads[h];
+
+       mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
+                           false);
+       mode->type |= DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+static int qxl_add_common_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode = NULL;
+       int i;
+       struct mode_size {
+               int w;
+               int h;
+       } common_modes[] = {
+               { 640,  480},
+               { 720,  480},
+               { 800,  600},
+               { 848,  480},
+               {1024,  768},
+               {1152,  768},
+               {1280,  720},
+               {1280,  800},
+               {1280,  854},
+               {1280,  960},
+               {1280, 1024},
+               {1440,  900},
+               {1400, 1050},
+               {1680, 1050},
+               {1600, 1200},
+               {1920, 1080},
+               {1920, 1200}
+       };
+
+       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+               if (common_modes[i].w < 320 || common_modes[i].h < 200)
+                       continue;
+
+               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+                                   60, false, false, false);
+               if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+                       mode->type |= DRM_MODE_TYPE_PREFERRED;
+               drm_mode_probed_add(connector, mode);
+       }
+       return i - 1;
+}
+
+static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                              u16 *blue, uint32_t start, uint32_t size)
+{
+       /* TODO */
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(qxl_crtc);
+}
+
+static void
+qxl_hide_cursor(struct qxl_device *qdev)
+{
+       struct qxl_release *release;
+       struct qxl_cursor_cmd *cmd;
+       int ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+                                        &release, NULL);
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_HIDE;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
+                              struct drm_file *file_priv,
+                              uint32_t handle,
+                              uint32_t width,
+                              uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct qxl_cursor *cursor;
+       struct qxl_cursor_cmd *cmd;
+       struct qxl_bo *cursor_bo, *user_bo;
+       struct qxl_release *release;
+       void *user_ptr;
+
+       int size = 64*64*4;
+       int ret = 0;
+       if (!handle) {
+               qxl_hide_cursor(qdev);
+               return 0;
+       }
+
+       obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("cannot find cursor object\n");
+               return -ENOENT;
+       }
+
+       user_bo = gem_to_qxl_bo(obj);
+
+       ret = qxl_bo_reserve(user_bo, false);
+       if (ret)
+               goto out_unref;
+
+       ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+       if (ret)
+               goto out_unreserve;
+
+       ret = qxl_bo_kmap(user_bo, &user_ptr);
+       if (ret)
+               goto out_unpin;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+                                        QXL_RELEASE_CURSOR_CMD,
+                                        &release, NULL);
+       if (ret)
+               goto out_kunmap;
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
+                                   &cursor_bo);
+       if (ret)
+               goto out_free_release;
+       ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+       if (ret)
+               goto out_free_bo;
+
+       cursor->header.unique = 0;
+       cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+       cursor->header.width = 64;
+       cursor->header.height = 64;
+       cursor->header.hot_spot_x = 0;
+       cursor->header.hot_spot_y = 0;
+       cursor->data_size = size;
+       cursor->chunk.next_chunk = 0;
+       cursor->chunk.prev_chunk = 0;
+       cursor->chunk.data_size = size;
+
+       memcpy(cursor->chunk.data, user_ptr, size);
+
+       qxl_bo_kunmap(cursor_bo);
+
+       /* finish with the userspace bo */
+       qxl_bo_kunmap(user_bo);
+       qxl_bo_unpin(user_bo);
+       qxl_bo_unreserve(user_bo);
+       drm_gem_object_unreference_unlocked(obj);
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_SET;
+       cmd->u.set.position.x = qcrtc->cur_x;
+       cmd->u.set.position.y = qcrtc->cur_y;
+
+       cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+       qxl_release_add_res(qdev, release, cursor_bo);
+
+       cmd->u.set.visible = 1;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+
+       qxl_bo_unreserve(cursor_bo);
+       qxl_bo_unref(&cursor_bo);
+
+       return ret;
+out_free_bo:
+       qxl_bo_unref(&cursor_bo);
+out_free_release:
+       qxl_release_unreserve(qdev, release);
+       qxl_release_free(qdev, release);
+out_kunmap:
+       qxl_bo_kunmap(user_bo);
+out_unpin:
+       qxl_bo_unpin(user_bo);
+out_unreserve:
+       qxl_bo_unreserve(user_bo);
+out_unref:
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+}
+
+static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+                               int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+       struct qxl_release *release;
+       struct qxl_cursor_cmd *cmd;
+       int ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+                                  &release, NULL);
+
+       qcrtc->cur_x = x;
+       qcrtc->cur_y = y;
+
+       cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+       cmd->type = QXL_CURSOR_MOVE;
+       cmd->u.position.x = qcrtc->cur_x;
+       cmd->u.position.y = qcrtc->cur_y;
+       qxl_release_unmap(qdev, release, &cmd->release_info);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+       qxl_release_unreserve(qdev, release);
+       return 0;
+}
+
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+       .cursor_set = qxl_crtc_cursor_set,
+       .cursor_move = qxl_crtc_cursor_move,
+       .gamma_set = qxl_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = qxl_crtc_destroy,
+};
+
+static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+
+       if (qxl_fb->obj)
+               drm_gem_object_unreference_unlocked(qxl_fb->obj);
+       drm_framebuffer_cleanup(fb);
+       kfree(qxl_fb);
+}
+
+static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+                                        struct drm_file *file_priv,
+                                        unsigned flags, unsigned color,
+                                        struct drm_clip_rect *clips,
+                                        unsigned num_clips)
+{
+       /* TODO: vmwgfx where this was cribbed from had locking. Why? */
+       struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+       struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+       struct drm_clip_rect norect;
+       struct qxl_bo *qobj;
+       int inc = 1;
+
+       qobj = gem_to_qxl_bo(qxl_fb->obj);
+       if (qxl_fb != qdev->active_user_framebuffer) {
+               DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
+                       __func__, qxl_fb, qdev->active_user_framebuffer);
+       }
+       if (!num_clips) {
+               num_clips = 1;
+               clips = &norect;
+               norect.x1 = norect.y1 = 0;
+               norect.x2 = fb->width;
+               norect.y2 = fb->height;
+       } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+               num_clips /= 2;
+               inc = 2; /* skip source rects */
+       }
+
+       qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+                         clips, num_clips, inc);
+       return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+       .destroy = qxl_user_framebuffer_destroy,
+       .dirty = qxl_framebuffer_surface_dirty,
+/*     TODO?
+ *     .create_handle = qxl_user_framebuffer_create_handle, */
+};
+
+int
+qxl_framebuffer_init(struct drm_device *dev,
+                    struct qxl_framebuffer *qfb,
+                    struct drm_mode_fb_cmd2 *mode_cmd,
+                    struct drm_gem_object *obj)
+{
+       int ret;
+
+       qfb->obj = obj;
+       ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+       if (ret) {
+               qfb->obj = NULL;
+               return ret;
+       }
+       drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
+       return 0;
+}
+
+static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+
+       qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
+                  __func__,
+                  mode->hdisplay, mode->vdisplay,
+                  adjusted_mode->hdisplay,
+                  adjusted_mode->vdisplay);
+       return true;
+}
+
+void
+qxl_send_monitors_config(struct qxl_device *qdev)
+{
+       int i;
+
+       BUG_ON(!qdev->ram_header->monitors_config);
+
+       if (qdev->monitors_config->count == 0) {
+               qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+               return;
+       }
+       for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+               struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+               if (head->y > 8192 || head->y < head->x ||
+                   head->width > 8192 || head->height > 8192) {
+                       DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+                                 i, head->width, head->height,
+                                 head->x, head->y);
+                       return;
+               }
+       }
+       qxl_io_monitors_config(qdev);
+}
+
+static void qxl_monitors_config_set_single(struct qxl_device *qdev,
+                                          unsigned x, unsigned y,
+                                          unsigned width, unsigned height)
+{
+       DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
+       qdev->monitors_config->count = 1;
+       qdev->monitors_config->heads[0].x = x;
+       qdev->monitors_config->heads[0].y = y;
+       qdev->monitors_config->heads[0].width = width;
+       qdev->monitors_config->heads[0].height = height;
+}
+
+static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_mode *m = (void *)mode->private;
+       struct qxl_framebuffer *qfb;
+       struct qxl_bo *bo, *old_bo = NULL;
+       uint32_t width, height, base_offset;
+       bool recreate_primary = false;
+       int ret;
+
+       if (!crtc->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (old_fb) {
+               qfb = to_qxl_framebuffer(old_fb);
+               old_bo = gem_to_qxl_bo(qfb->obj);
+       }
+       qfb = to_qxl_framebuffer(crtc->fb);
+       bo = gem_to_qxl_bo(qfb->obj);
+       if (!m)
+               /* and do we care? */
+               DRM_DEBUG("%dx%d: not a native mode\n", x, y);
+       else
+               DRM_DEBUG("%dx%d: qxl id %d\n",
+                         mode->hdisplay, mode->vdisplay, m->id);
+       DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+                 x, y,
+                 mode->hdisplay, mode->vdisplay,
+                 adjusted_mode->hdisplay,
+                 adjusted_mode->vdisplay);
+
+       recreate_primary = true;
+
+       width = mode->hdisplay;
+       height = mode->vdisplay;
+       base_offset = 0;
+
+       ret = qxl_bo_reserve(bo, false);
+       if (ret != 0)
+               return ret;
+       ret = qxl_bo_pin(bo, bo->type, NULL);
+       if (ret != 0) {
+               qxl_bo_unreserve(bo);
+               return -EINVAL;
+       }
+       qxl_bo_unreserve(bo);
+       if (recreate_primary) {
+               qxl_io_destroy_primary(qdev);
+               qxl_io_log(qdev,
+                          "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+                          width, height, bo->surf.width,
+                          bo->surf.height, bo->surf.stride, bo->surf.format);
+               qxl_io_create_primary(qdev, width, height, base_offset, bo);
+               bo->is_primary = true;
+       }
+
+       if (old_bo && old_bo != bo) {
+               old_bo->is_primary = false;
+               ret = qxl_bo_reserve(old_bo, false);
+               qxl_bo_unpin(old_bo);
+               qxl_bo_unreserve(old_bo);
+       }
+
+       if (qdev->monitors_config->count == 0) {
+               qxl_monitors_config_set_single(qdev, x, y,
+                                              mode->hdisplay,
+                                              mode->vdisplay);
+       }
+       qdev->mode_set = true;
+       return 0;
+}
+
+static void qxl_crtc_prepare(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+                 crtc->mode.hdisplay, crtc->mode.vdisplay,
+                 crtc->x, crtc->y, crtc->enabled);
+}
+
+static void qxl_crtc_commit(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_crtc_load_lut(struct drm_crtc *crtc)
+{
+       DRM_DEBUG("\n");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+       .dpms = qxl_crtc_dpms,
+       .mode_fixup = qxl_crtc_mode_fixup,
+       .mode_set = qxl_crtc_mode_set,
+       .prepare = qxl_crtc_prepare,
+       .commit = qxl_crtc_commit,
+       .load_lut = qxl_crtc_load_lut,
+};
+
+static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
+{
+       struct qxl_crtc *qxl_crtc;
+
+       qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+       if (!qxl_crtc)
+               return -ENOMEM;
+
+       drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
+       drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+       return 0;
+}
+
+static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+       DRM_DEBUG("\n");
+}
+
+static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
+                              const struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       DRM_DEBUG("\n");
+       return true;
+}
+
+static void qxl_enc_prepare(struct drm_encoder *encoder)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
+               struct drm_encoder *encoder)
+{
+       int i;
+       struct qxl_head *head;
+       struct drm_display_mode *mode;
+
+       BUG_ON(!encoder);
+       /* TODO: ugly, do better */
+       for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
+               ;
+       if (encoder->possible_crtcs != (1 << i)) {
+               DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
+                         encoder->possible_crtcs);
+               return;
+       }
+       if (!qdev->monitors_config ||
+           qdev->monitors_config->max_allowed <= i) {
+               DRM_ERROR(
+               "head number too large or missing monitors config: %p, %d",
+               qdev->monitors_config,
+               qdev->monitors_config ?
+                       qdev->monitors_config->max_allowed : -1);
+               return;
+       }
+       if (!encoder->crtc) {
+               DRM_ERROR("missing crtc on encoder %p\n", encoder);
+               return;
+       }
+       if (i != 0)
+               DRM_DEBUG("missing for multiple monitors: no head holes\n");
+       head = &qdev->monitors_config->heads[i];
+       head->id = i;
+       head->surface_id = 0;
+       if (encoder->crtc->enabled) {
+               mode = &encoder->crtc->mode;
+               head->width = mode->hdisplay;
+               head->height = mode->vdisplay;
+               head->x = encoder->crtc->x;
+               head->y = encoder->crtc->y;
+               if (qdev->monitors_config->count < i + 1)
+                       qdev->monitors_config->count = i + 1;
+       } else {
+               head->width = 0;
+               head->height = 0;
+               head->x = 0;
+               head->y = 0;
+       }
+       DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
+                 i, head->x, head->y, head->width, head->height);
+       head->flags = 0;
+       /* TODO - somewhere else to call this for multiple monitors
+        * (config_commit?) */
+       qxl_send_monitors_config(qdev);
+}
+
+static void qxl_enc_commit(struct drm_encoder *encoder)
+{
+       struct qxl_device *qdev = encoder->dev->dev_private;
+
+       qxl_write_monitors_config_for_encoder(qdev, encoder);
+       DRM_DEBUG("\n");
+}
+
+static void qxl_enc_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       DRM_DEBUG("\n");
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+       int ret = 0;
+       struct qxl_device *qdev = connector->dev->dev_private;
+
+       DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
+       /* TODO: what should we do here? only show the configured modes for the
+        * device, or allow the full list, or both? */
+       if (qdev->monitors_config && qdev->monitors_config->count) {
+               ret = qxl_add_monitors_config_modes(connector);
+               if (ret < 0)
+                       return ret;
+       }
+       ret += qxl_add_common_modes(connector);
+       return ret;
+}
+
+static int qxl_conn_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       /* TODO: is this called for user defined modes? (xrandr --add-mode)
+        * TODO: check that the mode fits in the framebuffer */
+       DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+                 mode->vdisplay, mode->status);
+       return MODE_OK;
+}
+
+static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+       struct qxl_output *qxl_output =
+               drm_connector_to_qxl_output(connector);
+
+       DRM_DEBUG("\n");
+       return &qxl_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+       .dpms = qxl_enc_dpms,
+       .mode_fixup = qxl_enc_mode_fixup,
+       .prepare = qxl_enc_prepare,
+       .mode_set = qxl_enc_mode_set,
+       .commit = qxl_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+       .get_modes = qxl_conn_get_modes,
+       .mode_valid = qxl_conn_mode_valid,
+       .best_encoder = qxl_best_encoder,
+};
+
+static void qxl_conn_save(struct drm_connector *connector)
+{
+       DRM_DEBUG("\n");
+}
+
+static void qxl_conn_restore(struct drm_connector *connector)
+{
+       DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status qxl_conn_detect(
+                       struct drm_connector *connector,
+                       bool force)
+{
+       struct qxl_output *output =
+               drm_connector_to_qxl_output(connector);
+       struct drm_device *ddev = connector->dev;
+       struct qxl_device *qdev = ddev->dev_private;
+       int connected;
+
+       /* The first monitor is always connected */
+       connected = (output->index == 0) ||
+                   (qdev->monitors_config &&
+                    qdev->monitors_config->count > output->index);
+
+       DRM_DEBUG("\n");
+       return connected ? connector_status_connected
+                        : connector_status_disconnected;
+}
+
+static int qxl_conn_set_property(struct drm_connector *connector,
+                                  struct drm_property *property,
+                                  uint64_t value)
+{
+       DRM_DEBUG("\n");
+       return 0;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+       struct qxl_output *qxl_output =
+               drm_connector_to_qxl_output(connector);
+
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = qxl_conn_save,
+       .restore = qxl_conn_restore,
+       .detect = qxl_conn_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = qxl_conn_set_property,
+       .destroy = qxl_conn_destroy,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+       .destroy = qxl_enc_destroy,
+};
+
+static int qdev_output_init(struct drm_device *dev, int num_output)
+{
+       struct qxl_output *qxl_output;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+       if (!qxl_output)
+               return -ENOMEM;
+
+       qxl_output->index = num_output;
+
+       connector = &qxl_output->base;
+       encoder = &qxl_output->enc;
+       drm_connector_init(dev, &qxl_output->base,
+                          &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+       drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+                        DRM_MODE_ENCODER_VIRTUAL);
+
+       encoder->possible_crtcs = 1 << num_output;
+       drm_mode_connector_attach_encoder(&qxl_output->base,
+                                         &qxl_output->enc);
+       drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+       drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+       return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+                           struct drm_file *file_priv,
+                           struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct qxl_framebuffer *qxl_fb;
+       struct qxl_device *qdev = dev->dev_private;
+       int ret;
+
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+
+       qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
+       if (qxl_fb == NULL)
+               return NULL;
+
+       ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+       if (ret) {
+               kfree(qxl_fb);
+               drm_gem_object_unreference_unlocked(obj);
+               return NULL;
+       }
+
+       if (qdev->active_user_framebuffer) {
+               DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
+                        __func__,
+                        qdev->active_user_framebuffer, qxl_fb);
+       }
+       qdev->active_user_framebuffer = qxl_fb;
+
+       return &qxl_fb->base;
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+       .fb_create = qxl_user_framebuffer_create,
+};
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+       int i;
+       int ret;
+       struct drm_gem_object *gobj;
+       int max_allowed = QXL_NUM_OUTPUTS;
+       int monitors_config_size = sizeof(struct qxl_monitors_config) +
+                                  max_allowed * sizeof(struct qxl_head);
+
+       drm_mode_config_init(qdev->ddev);
+       ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+                                   QXL_GEM_DOMAIN_VRAM,
+                                   false, false, NULL, &gobj);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+               return -ENOMEM;
+       }
+       qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+       qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+       qdev->monitors_config = qdev->monitors_config_bo->kptr;
+       qdev->ram_header->monitors_config =
+               qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+       memset(qdev->monitors_config, 0, monitors_config_size);
+       qdev->monitors_config->max_allowed = max_allowed;
+
+       qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+
+       /* modes will be validated against the framebuffer size */
+       qdev->ddev->mode_config.min_width = 320;
+       qdev->ddev->mode_config.min_height = 200;
+       qdev->ddev->mode_config.max_width = 8192;
+       qdev->ddev->mode_config.max_height = 8192;
+
+       qdev->ddev->mode_config.fb_base = qdev->vram_base;
+       for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
+               qdev_crtc_init(qdev->ddev, i);
+               qdev_output_init(qdev->ddev, i);
+       }
+
+       qdev->mode_info.mode_config_initialized = true;
+
+       /* primary surface must be created by this point, to allow
+        * issuing command queue commands and having them read by
+        * spice server. */
+       qxl_fbdev_init(qdev);
+       return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+       qxl_fbdev_fini(qdev);
+       if (qdev->mode_info.mode_config_initialized) {
+               drm_mode_config_cleanup(qdev->ddev);
+               qdev->mode_info.mode_config_initialized = false;
+       }
+}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644 (file)
index 0000000..3c8c3db
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+                                             struct qxl_drawable *drawable,
+                                             unsigned num_clips,
+                                             struct qxl_bo **clips_bo,
+                                             struct qxl_release *release)
+{
+       struct qxl_clip_rects *dev_clips;
+       int ret;
+       int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
+       ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
+       if (ret)
+               return NULL;
+
+       ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+       if (ret) {
+               qxl_bo_unref(clips_bo);
+               return NULL;
+       }
+       dev_clips->num_rects = num_clips;
+       dev_clips->chunk.next_chunk = 0;
+       dev_clips->chunk.prev_chunk = 0;
+       dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+       return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+             const struct qxl_rect *rect,
+             struct qxl_release **release)
+{
+       struct qxl_drawable *drawable;
+       int i, ret;
+
+       ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
+                                        QXL_RELEASE_DRAWABLE, release,
+                                        NULL);
+       if (ret)
+               return ret;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
+       drawable->type = type;
+
+       drawable->surface_id = surface;         /* Only primary for now */
+       drawable->effect = QXL_EFFECT_OPAQUE;
+       drawable->self_bitmap = 0;
+       drawable->self_bitmap_area.top = 0;
+       drawable->self_bitmap_area.left = 0;
+       drawable->self_bitmap_area.bottom = 0;
+       drawable->self_bitmap_area.right = 0;
+       /* FIXME: add clipping */
+       drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+       /*
+        * surfaces_dest[i] should apparently be filled out with the
+        * surfaces that we depend on, and surface_rects should be
+        * filled with the rectangles of those surfaces that we
+        * are going to use.
+        */
+       for (i = 0; i < 3; ++i)
+               drawable->surfaces_dest[i] = -1;
+
+       if (rect)
+               drawable->bbox = *rect;
+
+       drawable->mm_time = qdev->rom->mm_clock;
+       qxl_release_unmap(qdev, *release, &drawable->release_info);
+       return 0;
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+                                  const struct qxl_fb_image *qxl_fb_image)
+{
+       struct qxl_device *qdev = qxl_fb_image->qdev;
+       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+       uint32_t visual = qxl_fb_image->visual;
+       const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
+       struct qxl_palette *pal;
+       int ret;
+       uint32_t fgcolor, bgcolor;
+       static uint64_t unique; /* we make no attempt to actually set this
+                                * correctly globaly, since that would require
+                                * tracking all of our palettes. */
+
+       ret = qxl_alloc_bo_reserved(qdev,
+                                   sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+                                   palette_bo);
+
+       ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+       pal->num_ents = 2;
+       pal->unique = unique++;
+       if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+               /* NB: this is the only used branch currently. */
+               fgcolor = pseudo_palette[fb_image->fg_color];
+               bgcolor = pseudo_palette[fb_image->bg_color];
+       } else {
+               fgcolor = fb_image->fg_color;
+               bgcolor = fb_image->bg_color;
+       }
+       pal->ents[0] = bgcolor;
+       pal->ents[1] = fgcolor;
+       qxl_bo_kunmap(*palette_bo);
+       return 0;
+}
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+                       int stride /* filled in if 0 */)
+{
+       struct qxl_device *qdev = qxl_fb_image->qdev;
+       struct qxl_drawable *drawable;
+       struct qxl_rect rect;
+       const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+       int x = fb_image->dx;
+       int y = fb_image->dy;
+       int width = fb_image->width;
+       int height = fb_image->height;
+       const char *src = fb_image->data;
+       int depth = fb_image->depth;
+       struct qxl_release *release;
+       struct qxl_bo *image_bo;
+       struct qxl_image *image;
+       int ret;
+
+       if (stride == 0)
+               stride = depth * width / 8;
+
+       rect.left = x;
+       rect.right = x + width;
+       rect.top = y;
+       rect.bottom = y + height;
+
+       ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
+       if (ret)
+               return;
+
+       ret = qxl_image_create(qdev, release, &image_bo,
+                              (const uint8_t *)src, 0, 0,
+                              width, height, depth, stride);
+       if (ret) {
+               qxl_release_unreserve(qdev, release);
+               qxl_release_free(qdev, release);
+               return;
+       }
+
+       if (depth == 1) {
+               struct qxl_bo *palette_bo;
+               void *ptr;
+               ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
+               qxl_release_add_res(qdev, release, palette_bo);
+
+               ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+               image = ptr;
+               image->u.bitmap.palette =
+                       qxl_bo_physical_address(qdev, palette_bo, 0);
+               qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+               qxl_bo_unreserve(palette_bo);
+               qxl_bo_unref(&palette_bo);
+       }
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+       drawable->u.copy.src_area.top = 0;
+       drawable->u.copy.src_area.bottom = height;
+       drawable->u.copy.src_area.left = 0;
+       drawable->u.copy.src_area.right = width;
+
+       drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+       drawable->u.copy.scale_mode = 0;
+       drawable->u.copy.mask.flags = 0;
+       drawable->u.copy.mask.pos.x = 0;
+       drawable->u.copy.mask.pos.y = 0;
+       drawable->u.copy.mask.bitmap = 0;
+
+       drawable->u.copy.src_bitmap =
+               qxl_bo_physical_address(qdev, image_bo, 0);
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+
+       qxl_release_add_res(qdev, release, image_bo);
+       qxl_bo_unreserve(image_bo);
+       qxl_bo_unref(&image_bo);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+                      struct qxl_framebuffer *qxl_fb,
+                      struct qxl_bo *bo,
+                      unsigned flags, unsigned color,
+                      struct drm_clip_rect *clips,
+                      unsigned num_clips, int inc)
+{
+       /*
+        * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+        * send a fill command instead, much cheaper.
+        *
+        * See include/drm/drm_mode.h
+        */
+       struct drm_clip_rect *clips_ptr;
+       int i;
+       int left, right, top, bottom;
+       int width, height;
+       struct qxl_drawable *drawable;
+       struct qxl_rect drawable_rect;
+       struct qxl_rect *rects;
+       int stride = qxl_fb->base.pitches[0];
+       /* depth is not actually interesting, we don't mask with it */
+       int depth = qxl_fb->base.bits_per_pixel;
+       uint8_t *surface_base;
+       struct qxl_release *release;
+       struct qxl_bo *image_bo;
+       struct qxl_bo *clips_bo;
+       int ret;
+
+       left = clips->x1;
+       right = clips->x2;
+       top = clips->y1;
+       bottom = clips->y2;
+
+       /* skip the first clip rect */
+       for (i = 1, clips_ptr = clips + inc;
+            i < num_clips; i++, clips_ptr += inc) {
+               left = min_t(int, left, (int)clips_ptr->x1);
+               right = max_t(int, right, (int)clips_ptr->x2);
+               top = min_t(int, top, (int)clips_ptr->y1);
+               bottom = max_t(int, bottom, (int)clips_ptr->y2);
+       }
+
+       width = right - left;
+       height = bottom - top;
+       drawable_rect.left = left;
+       drawable_rect.right = right;
+       drawable_rect.top = top;
+       drawable_rect.bottom = bottom;
+       ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+                           &release);
+       if (ret)
+               return;
+
+       ret = qxl_bo_kmap(bo, (void **)&surface_base);
+       if (ret)
+               goto out_unref;
+
+       ret = qxl_image_create(qdev, release, &image_bo, surface_base,
+                              left, top, width, height, depth, stride);
+       qxl_bo_kunmap(bo);
+       if (ret)
+               goto out_unref;
+
+       rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
+       if (!rects) {
+               qxl_bo_unref(&image_bo);
+               goto out_unref;
+       }
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+       drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+       drawable->clip.data = qxl_bo_physical_address(qdev,
+                                                     clips_bo, 0);
+       qxl_release_add_res(qdev, release, clips_bo);
+
+       drawable->u.copy.src_area.top = 0;
+       drawable->u.copy.src_area.bottom = height;
+       drawable->u.copy.src_area.left = 0;
+       drawable->u.copy.src_area.right = width;
+
+       drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+       drawable->u.copy.scale_mode = 0;
+       drawable->u.copy.mask.flags = 0;
+       drawable->u.copy.mask.pos.x = 0;
+       drawable->u.copy.mask.pos.y = 0;
+       drawable->u.copy.mask.bitmap = 0;
+
+       drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_release_add_res(qdev, release, image_bo);
+       qxl_bo_unreserve(image_bo);
+       qxl_bo_unref(&image_bo);
+       clips_ptr = clips;
+       for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+               rects[i].left   = clips_ptr->x1;
+               rects[i].right  = clips_ptr->x2;
+               rects[i].top    = clips_ptr->y1;
+               rects[i].bottom = clips_ptr->y2;
+       }
+       qxl_bo_kunmap(clips_bo);
+       qxl_bo_unreserve(clips_bo);
+       qxl_bo_unref(&clips_bo);
+
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+       return;
+
+out_unref:
+       qxl_release_unreserve(qdev, release);
+       qxl_release_free(qdev, release);
+}
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+                      u32 width, u32 height,
+                      u32 sx, u32 sy,
+                      u32 dx, u32 dy)
+{
+       struct qxl_drawable *drawable;
+       struct qxl_rect rect;
+       struct qxl_release *release;
+       int ret;
+
+       rect.left = dx;
+       rect.top = dy;
+       rect.right = dx + width;
+       rect.bottom = dy + height;
+       ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
+       if (ret)
+               return;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+       drawable->u.copy_bits.src_pos.x = sx;
+       drawable->u.copy_bits.src_pos.y = sy;
+
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
+{
+       struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
+       struct qxl_rect rect = qxl_draw_fill_rec->rect;
+       uint32_t color = qxl_draw_fill_rec->color;
+       uint16_t rop = qxl_draw_fill_rec->rop;
+       struct qxl_drawable *drawable;
+       struct qxl_release *release;
+       int ret;
+
+       ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+       if (ret)
+               return;
+
+       drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+       drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
+       drawable->u.fill.brush.u.color = color;
+       drawable->u.fill.rop_descriptor = rop;
+       drawable->u.fill.mask.flags = 0;
+       drawable->u.fill.mask.pos.x = 0;
+       drawable->u.fill.mask.pos.y = 0;
+       drawable->u.fill.mask.bitmap = 0;
+
+       qxl_release_unmap(qdev, release, &drawable->release_info);
+       qxl_fence_releaseable(qdev, release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+       qxl_release_unreserve(qdev, release);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644 (file)
index 0000000..aa291d8
--- /dev/null
@@ -0,0 +1,145 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlie@redhat.com>
+ *    Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "qxl_drv.h"
+
+extern int qxl_max_ioctls;
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+       { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+         0xffff00, 0 },
+       { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+         0xffff00, 0 },
+       { 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int qxl_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       if (pdev->revision < 4) {
+               DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+                         " use xf86-video-qxl in user mode");
+               return -EINVAL; /* TODO: ENODEV ? */
+       }
+       return drm_get_pci_dev(pdev, ent, &qxl_driver);
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       drm_put_dev(dev);
+}
+
+static struct pci_driver qxl_pci_driver = {
+        .name = DRIVER_NAME,
+        .id_table = pciidlist,
+        .probe = qxl_pci_probe,
+        .remove = qxl_pci_remove,
+};
+
+static const struct file_operations qxl_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .mmap = qxl_mmap,
+};
+
+static struct drm_driver qxl_driver = {
+       .driver_features = DRIVER_GEM | DRIVER_MODESET |
+                          DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+       .dev_priv_size = 0,
+       .load = qxl_driver_load,
+       .unload = qxl_driver_unload,
+
+       .dumb_create = qxl_mode_dumb_create,
+       .dumb_map_offset = qxl_mode_dumb_mmap,
+       .dumb_destroy = qxl_mode_dumb_destroy,
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = qxl_debugfs_init,
+       .debugfs_cleanup = qxl_debugfs_takedown,
+#endif
+       .gem_init_object = qxl_gem_object_init,
+       .gem_free_object = qxl_gem_object_free,
+       .gem_open_object = qxl_gem_object_open,
+       .gem_close_object = qxl_gem_object_close,
+       .fops = &qxl_fops,
+       .ioctls = qxl_ioctls,
+       .irq_handler = qxl_irq_handler,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = 0,
+       .minor = 1,
+       .patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && qxl_modeset == -1)
+               return -EINVAL;
+#endif
+
+       if (qxl_modeset == 0)
+               return -EINVAL;
+       qxl_driver.num_ioctls = qxl_max_ioctls;
+       return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+       drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644 (file)
index 0000000..52b582c
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#include <drm/qxl_drm.h>
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR          "Dave Airlie"
+
+#define DRIVER_NAME            "qxl"
+#define DRIVER_DESC            "RH QXL"
+#define DRIVER_DATE            "20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_NUM_OUTPUTS 1
+
+#define QXL_DEBUGFS_MAX_COMPONENTS             32
+
+extern int qxl_log_level;
+
+enum {
+       QXL_INFO_LEVEL = 1,
+       QXL_DEBUG_LEVEL = 2,
+};
+
+#define QXL_INFO(qdev, fmt, ...) do { \
+               if (qxl_log_level >= QXL_INFO_LEVEL) {  \
+                       qxl_io_log(qdev, fmt, __VA_ARGS__); \
+               }       \
+       } while (0)
+#define QXL_DEBUG(qdev, fmt, ...) do { \
+               if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
+                       qxl_io_log(qdev, fmt, __VA_ARGS__); \
+               }       \
+       } while (0)
+#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
+               static int done;                \
+               if (!done) {                    \
+                       done = 1;                       \
+                       QXL_INFO(qdev, fmt, __VA_ARGS__);       \
+               }                                               \
+       } while (0)
+
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+#define QXL_INTERRUPT_MASK (\
+       QXL_INTERRUPT_DISPLAY |\
+       QXL_INTERRUPT_CURSOR |\
+       QXL_INTERRUPT_IO_CMD |\
+       QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_fence {
+       struct qxl_device *qdev;
+       uint32_t num_active_releases;
+       uint32_t *release_ids;
+       struct radix_tree_root tree;
+};
+
+struct qxl_bo {
+       /* Protected by gem.mutex */
+       struct list_head                list;
+       /* Protected by tbo.reserved */
+       u32                             placements[3];
+       struct ttm_placement            placement;
+       struct ttm_buffer_object        tbo;
+       struct ttm_bo_kmap_obj          kmap;
+       unsigned                        pin_count;
+       void                            *kptr;
+       int                             type;
+       /* Constant after initialization */
+       struct drm_gem_object           gem_base;
+       bool is_primary; /* is this now a primary surface */
+       bool hw_surf_alloc;
+       struct qxl_surface surf;
+       uint32_t surface_id;
+       struct qxl_fence fence; /* per bo fence  - list of releases */
+       struct qxl_release *surf_create;
+       atomic_t reserve_count;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+
+struct qxl_gem {
+       struct mutex            mutex;
+       struct list_head        objects;
+};
+
+struct qxl_bo_list {
+       struct list_head lhead;
+       struct qxl_bo *bo;
+};
+
+struct qxl_reloc_list {
+       struct list_head bos;
+};
+
+struct qxl_crtc {
+       struct drm_crtc base;
+       int cur_x;
+       int cur_y;
+};
+
+struct qxl_output {
+       int index;
+       struct drm_connector base;
+       struct drm_encoder enc;
+};
+
+struct qxl_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *obj;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
+
+struct qxl_mman {
+       struct ttm_bo_global_ref        bo_global_ref;
+       struct drm_global_reference     mem_global_ref;
+       bool                            mem_global_referenced;
+       struct ttm_bo_device            bdev;
+};
+
+struct qxl_mode_info {
+       int num_modes;
+       struct qxl_mode *modes;
+       bool mode_config_initialized;
+
+       /* pointer to fbdev info structure */
+       struct qxl_fbdev *qfbdev;
+};
+
+
+struct qxl_memslot {
+       uint8_t         generation;
+       uint64_t        start_phys_addr;
+       uint64_t        end_phys_addr;
+       uint64_t        high_bits;
+};
+
+enum {
+       QXL_RELEASE_DRAWABLE,
+       QXL_RELEASE_SURFACE_CMD,
+       QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+       int id;
+       int type;
+       int bo_count;
+       uint32_t release_offset;
+       uint32_t surface_release_id;
+       struct qxl_bo *bos[QXL_MAX_RES];
+};
+
+struct qxl_fb_image {
+       struct qxl_device *qdev;
+       uint32_t pseudo_palette[16];
+       struct fb_image fb_image;
+       uint32_t visual;
+};
+
+struct qxl_draw_fill {
+       struct qxl_device *qdev;
+       struct qxl_rect rect;
+       uint32_t color;
+       uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+       struct drm_info_list    *files;
+       unsigned                num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+                            struct drm_info_list *files,
+                            unsigned nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+void qxl_debugfs_remove_files(struct qxl_device *qdev);
+
+struct qxl_device;
+
+struct qxl_device {
+       struct device                   *dev;
+       struct drm_device               *ddev;
+       struct pci_dev                  *pdev;
+       unsigned long flags;
+
+       resource_size_t vram_base, vram_size;
+       resource_size_t surfaceram_base, surfaceram_size;
+       resource_size_t rom_base, rom_size;
+       struct qxl_rom *rom;
+
+       struct qxl_mode *modes;
+       struct qxl_bo *monitors_config_bo;
+       struct qxl_monitors_config *monitors_config;
+
+       /* last received client_monitors_config */
+       struct qxl_monitors_config *client_monitors_config;
+
+       int io_base;
+       void *ram;
+       struct qxl_mman         mman;
+       struct qxl_gem          gem;
+       struct qxl_mode_info mode_info;
+
+       /*
+        * last created framebuffer with fb_create
+        * only used by debugfs dumbppm
+        */
+       struct qxl_framebuffer *active_user_framebuffer;
+
+       struct fb_info                  *fbdev_info;
+       struct qxl_framebuffer  *fbdev_qfb;
+       void *ram_physical;
+
+       struct qxl_ring *release_ring;
+       struct qxl_ring *command_ring;
+       struct qxl_ring *cursor_ring;
+
+       struct qxl_ram_header *ram_header;
+       bool mode_set;
+
+       bool primary_created;
+
+       struct qxl_memslot      *mem_slots;
+       uint8_t         n_mem_slots;
+
+       uint8_t         main_mem_slot;
+       uint8_t         surfaces_mem_slot;
+       uint8_t         slot_id_bits;
+       uint8_t         slot_gen_bits;
+       uint64_t        va_slot_mask;
+
+       struct idr      release_idr;
+       spinlock_t release_idr_lock;
+       struct mutex    async_io_mutex;
+       unsigned int last_sent_io_cmd;
+
+       /* interrupt handling */
+       atomic_t irq_received;
+       atomic_t irq_received_display;
+       atomic_t irq_received_cursor;
+       atomic_t irq_received_io_cmd;
+       unsigned irq_received_error;
+       wait_queue_head_t display_event;
+       wait_queue_head_t cursor_event;
+       wait_queue_head_t io_cmd_event;
+       struct work_struct client_monitors_config_work;
+
+       /* debugfs */
+       struct qxl_debugfs      debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+       unsigned                debugfs_count;
+
+       struct mutex            update_area_mutex;
+
+       struct idr      surf_id_idr;
+       spinlock_t surf_id_idr_lock;
+       int last_alloced_surf_id;
+
+       struct mutex surf_evict_mutex;
+       struct io_mapping *vram_mapping;
+       struct io_mapping *surface_mapping;
+
+       /* */
+       struct mutex release_mutex;
+       struct qxl_bo *current_release_bo[3];
+       int current_release_bo_offset[3];
+
+       struct workqueue_struct *gc_queue;
+       struct work_struct gc_work;
+
+};
+
+/* forward declaration for QXL_INFO_IO */
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+
+extern struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags);
+int qxl_driver_unload(struct drm_device *dev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+                                int element_size,
+                                int n_elements,
+                                int prod_notify,
+                                bool set_prod_notify,
+                                wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+
+static inline void *
+qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
+{
+       QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+       return 0;
+}
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+                       unsigned long offset)
+{
+       int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+       struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+       /* TODO - need to hold one of the locks to read tbo.offset */
+       return slot->high_bits | (bo->tbo.offset + offset);
+}
+
+/* qxl_fb.c */
+#define QXLFB_CONN_LIMIT 1
+
+int qxl_fbdev_init(struct qxl_device *qdev);
+void qxl_fbdev_fini(struct qxl_device *qdev);
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+                                 struct drm_file *file_priv,
+                                 uint32_t *handle);
+
+/* qxl_display.c */
+int
+qxl_framebuffer_init(struct drm_device *dev,
+                    struct qxl_framebuffer *rfb,
+                    struct drm_mode_fb_cmd2 *mode_cmd,
+                    struct drm_gem_object *obj);
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+void qxl_send_monitors_config(struct qxl_device *qdev);
+
+/* used by qxl_debugfs only */
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
+
+/* qxl_gem.c */
+int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+                         int alignment, int initial_domain,
+                         bool discardable, bool kernel,
+                         struct qxl_surface *surf,
+                         struct drm_gem_object **obj);
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr);
+void qxl_gem_object_unpin(struct drm_gem_object *obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+                                     struct drm_file *file_priv,
+                                     u32 domain,
+                                     size_t size,
+                                     struct qxl_surface *surf,
+                                     struct qxl_bo **qobj,
+                                     uint32_t *handle);
+int qxl_gem_object_init(struct drm_gem_object *obj);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+                         struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+                        struct drm_device *dev,
+                        struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+                         struct drm_device *dev,
+                         uint32_t handle);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+                      struct drm_device *dev,
+                      uint32_t handle, uint64_t *offset_p);
+
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_create(struct qxl_device *qdev,
+                    struct qxl_release *release,
+                    struct qxl_bo **image_bo,
+                    const uint8_t *data,
+                    int x, int y, int width, int height,
+                    int depth, int stride);
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+                          unsigned width, unsigned height, unsigned offset,
+                          struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+                      const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+int qxl_release_reserve(struct qxl_device *qdev,
+                       struct qxl_release *release, bool no_wait);
+void qxl_release_unreserve(struct qxl_device *qdev,
+                          struct qxl_release *release);
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+                                       struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+                      struct qxl_release *release,
+                      union qxl_release_info *info);
+/*
+ * qxl_bo_add_resource.
+ *
+ */
+void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+                                      enum qxl_surface_cmd_type surface_cmd_type,
+                                      struct qxl_release *create_rel,
+                                      struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+                              int type, struct qxl_release **release,
+                              struct qxl_bo **rbo);
+int qxl_fence_releaseable(struct qxl_device *qdev,
+                         struct qxl_release *release);
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                             uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+                            uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+                         struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+                       int stride /* filled in if 0 */);
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+                      struct qxl_framebuffer *qxl_fb,
+                      struct qxl_bo *bo,
+                      unsigned flags, unsigned color,
+                      struct drm_clip_rect *clips,
+                      unsigned num_clips, int inc);
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+                      u32 width, u32 height,
+                      u32 sx, u32 sy,
+                      u32 dx, u32 dy);
+
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+                 struct qxl_release **ret);
+
+void qxl_release_free(struct qxl_device *qdev,
+                     struct qxl_release *release);
+void qxl_release_add_res(struct qxl_device *qdev,
+                        struct qxl_release *release,
+                        struct qxl_bo *bo);
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+                                                  uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+void qxl_debugfs_takedown(struct drm_minor *minor);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+
+/* qxl_fb.c */
+int qxl_fb_init(struct qxl_device *qdev);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+                         struct drm_info_list *files,
+                         unsigned nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+                           uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+                        struct qxl_bo *surf,
+                        struct ttm_mem_reg *mem);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+                          struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
+
+/* qxl_fence.c */
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
+void qxl_fence_fini(struct qxl_fence *qfence);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644 (file)
index 0000000..847c4ee
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+                           struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct qxl_bo *qobj;
+       uint32_t handle;
+       int r;
+       struct qxl_surface surf;
+       uint32_t pitch, format;
+       pitch = args->width * ((args->bpp + 1) / 8);
+       args->size = pitch * args->height;
+       args->size = ALIGN(args->size, PAGE_SIZE);
+
+       switch (args->bpp) {
+       case 16:
+               format = SPICE_SURFACE_FMT_16_565;
+               break;
+       case 32:
+               format = SPICE_SURFACE_FMT_32_xRGB;
+               break;
+       default:
+               return -EINVAL;
+       }
+         
+       surf.width = args->width;
+       surf.height = args->height;
+       surf.stride = pitch;
+       surf.format = format;
+       r = qxl_gem_object_create_with_handle(qdev, file_priv,
+                                             QXL_GEM_DOMAIN_VRAM,
+                                             args->size, &surf, &qobj,
+                                             &handle);
+       if (r)
+               return r;
+       args->pitch = pitch;
+       args->handle = handle;
+       return 0;
+}
+
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+                            struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+                      struct drm_device *dev,
+                      uint32_t handle, uint64_t *offset_p)
+{
+       struct drm_gem_object *gobj;
+       struct qxl_bo *qobj;
+
+       BUG_ON(!offset_p);
+       gobj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gobj == NULL)
+               return -ENOENT;
+       qobj = gem_to_qxl_bo(gobj);
+       *offset_p = qxl_bo_mmap_offset(qobj);
+       drm_gem_object_unreference_unlocked(gobj);
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644 (file)
index 0000000..b3c5127
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * Copyright Â© 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "drm/drm_crtc.h"
+#include "drm/drm_crtc_helper.h"
+#include "qxl_drv.h"
+
+#include "qxl_object.h"
+#include "drm_fb_helper.h"
+
+#define QXL_DIRTY_DELAY (HZ / 30)
+
+struct qxl_fbdev {
+       struct drm_fb_helper helper;
+       struct qxl_framebuffer  qfb;
+       struct list_head        fbdev_list;
+       struct qxl_device       *qdev;
+
+       void *shadow;
+       int size;
+
+       /* dirty memory logging */
+       struct {
+               spinlock_t lock;
+               bool active;
+               unsigned x1;
+               unsigned y1;
+               unsigned x2;
+               unsigned y2;
+       } dirty;
+};
+
+static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
+                             struct qxl_device *qdev, struct fb_info *info,
+                             const struct fb_image *image)
+{
+       qxl_fb_image->qdev = qdev;
+       if (info) {
+               qxl_fb_image->visual = info->fix.visual;
+               if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
+                   qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
+                       memcpy(&qxl_fb_image->pseudo_palette,
+                              info->pseudo_palette,
+                              sizeof(qxl_fb_image->pseudo_palette));
+       } else {
+                /* fallback */
+               if (image->depth == 1)
+                       qxl_fb_image->visual = FB_VISUAL_MONO10;
+               else
+                       qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
+       }
+       if (image) {
+               memcpy(&qxl_fb_image->fb_image, image,
+                      sizeof(qxl_fb_image->fb_image));
+       }
+}
+
+static void qxl_fb_dirty_flush(struct fb_info *info)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_fb_image qxl_fb_image;
+       struct fb_image *image = &qxl_fb_image.fb_image;
+       u32 x1, x2, y1, y2;
+
+       /* TODO: hard coding 32 bpp */
+       int stride = qfbdev->qfb.base.pitches[0] * 4;
+
+       x1 = qfbdev->dirty.x1;
+       x2 = qfbdev->dirty.x2;
+       y1 = qfbdev->dirty.y1;
+       y2 = qfbdev->dirty.y2;
+       /*
+        * we are using a shadow draw buffer, at qdev->surface0_shadow
+        */
+       qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
+       image->dx = x1;
+       image->dy = y1;
+       image->width = x2 - x1;
+       image->height = y2 - y1;
+       image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+                                        warnings */
+       image->bg_color = 0;
+       image->depth = 32;           /* TODO: take from somewhere? */
+       image->cmap.start = 0;
+       image->cmap.len = 0;
+       image->cmap.red = NULL;
+       image->cmap.green = NULL;
+       image->cmap.blue = NULL;
+       image->cmap.transp = NULL;
+       image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
+
+       qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+       qxl_draw_opaque_fb(&qxl_fb_image, stride);
+       qfbdev->dirty.x1 = 0;
+       qfbdev->dirty.x2 = 0;
+       qfbdev->dirty.y1 = 0;
+       qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_deferred_io(struct fb_info *info,
+                           struct list_head *pagelist)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       unsigned long start, end, min, max;
+       struct page *page;
+       int y1, y2;
+
+       min = ULONG_MAX;
+       max = 0;
+       list_for_each_entry(page, pagelist, lru) {
+               start = page->index << PAGE_SHIFT;
+               end = start + PAGE_SIZE - 1;
+               min = min(min, start);
+               max = max(max, end);
+       }
+
+       if (min < max) {
+               y1 = min / info->fix.line_length;
+               y2 = (max / info->fix.line_length) + 1;
+
+               /* TODO: add spin lock? */
+               /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
+               qfbdev->dirty.x1 = 0;
+               qfbdev->dirty.y1 = y1;
+               qfbdev->dirty.x2 = info->var.xres;
+               qfbdev->dirty.y2 = y2;
+               /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+       }
+
+       qxl_fb_dirty_flush(info);
+};
+
+
+static struct fb_deferred_io qxl_defio = {
+       .delay          = QXL_DIRTY_DELAY,
+       .deferred_io    = qxl_deferred_io,
+};
+
+static void qxl_fb_fillrect(struct fb_info *info,
+                           const struct fb_fillrect *fb_rect)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_rect rect;
+       uint32_t color;
+       int x = fb_rect->dx;
+       int y = fb_rect->dy;
+       int width = fb_rect->width;
+       int height = fb_rect->height;
+       uint16_t rop;
+       struct qxl_draw_fill qxl_draw_fill_rec;
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+               color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
+       else
+               color = fb_rect->color;
+       rect.left = x;
+       rect.right = x + width;
+       rect.top = y;
+       rect.bottom = y + height;
+       switch (fb_rect->rop) {
+       case ROP_XOR:
+               rop = SPICE_ROPD_OP_XOR;
+               break;
+       case ROP_COPY:
+               rop = SPICE_ROPD_OP_PUT;
+               break;
+       default:
+               pr_err("qxl_fb_fillrect(): unknown rop, "
+                      "defaulting to SPICE_ROPD_OP_PUT\n");
+               rop = SPICE_ROPD_OP_PUT;
+       }
+       qxl_draw_fill_rec.qdev = qdev;
+       qxl_draw_fill_rec.rect = rect;
+       qxl_draw_fill_rec.color = color;
+       qxl_draw_fill_rec.rop = rop;
+       if (!drm_can_sleep()) {
+               qxl_io_log(qdev,
+                       "%s: TODO use RCU, mysterious locks with spin_lock\n",
+                       __func__);
+               return;
+       }
+       qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_copyarea(struct fb_info *info,
+                           const struct fb_copyarea *region)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+
+       qxl_draw_copyarea(qfbdev->qdev,
+                         region->width, region->height,
+                         region->sx, region->sy,
+                         region->dx, region->dy);
+}
+
+static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
+{
+       qxl_draw_opaque_fb(qxl_fb_image, 0);
+}
+
+static void qxl_fb_imageblit(struct fb_info *info,
+                            const struct fb_image *image)
+{
+       struct qxl_fbdev *qfbdev = info->par;
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct qxl_fb_image qxl_fb_image;
+
+       if (!drm_can_sleep()) {
+               /* we cannot do any ttm_bo allocation since that will fail on
+                * ioremap_wc..__get_vm_area_node, so queue the work item
+                * instead This can happen from printk inside an interrupt
+                * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
+               qxl_io_log(qdev,
+                       "%s: TODO use RCU, mysterious locks with spin_lock\n",
+                          __func__);
+               return;
+       }
+
+       /* ensure proper order of rendering operations - TODO: must do this
+        * for everything. */
+       qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+       qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
+int qxl_fb_init(struct qxl_device *qdev)
+{
+       return 0;
+}
+
+static struct fb_ops qxlfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+       .fb_fillrect = qxl_fb_fillrect,
+       .fb_copyarea = qxl_fb_copyarea,
+       .fb_imageblit = qxl_fb_imageblit,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+       struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
+       int ret;
+
+       ret = qxl_bo_reserve(qbo, false);
+       if (likely(ret == 0)) {
+               qxl_bo_kunmap(qbo);
+               qxl_bo_unpin(qbo);
+               qxl_bo_unreserve(qbo);
+       }
+       drm_gem_object_unreference_unlocked(gobj);
+}
+
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+                                 struct drm_file *file_priv,
+                                 uint32_t *handle)
+{
+       int r;
+       struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
+
+       BUG_ON(!gobj);
+       /* drm_get_handle_create adds a reference - good */
+       r = drm_gem_handle_create(file_priv, gobj, handle);
+       if (r)
+               return r;
+       return 0;
+}
+
+static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+                                     struct drm_mode_fb_cmd2 *mode_cmd,
+                                     struct drm_gem_object **gobj_p)
+{
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qbo = NULL;
+       int ret;
+       int aligned_size, size;
+       int height = mode_cmd->height;
+       int bpp;
+       int depth;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
+
+       size = mode_cmd->pitches[0] * height;
+       aligned_size = ALIGN(size, PAGE_SIZE);
+       /* TODO: unallocate and reallocate surface0 for real. Hack to just
+        * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
+       ret = qxl_gem_object_create(qdev, aligned_size, 0,
+                                   QXL_GEM_DOMAIN_SURFACE,
+                                   false, /* is discardable */
+                                   false, /* is kernel (false means device) */
+                                   NULL,
+                                   &gobj);
+       if (ret) {
+               pr_err("failed to allocate framebuffer (%d)\n",
+                      aligned_size);
+               return -ENOMEM;
+       }
+       qbo = gem_to_qxl_bo(gobj);
+
+       qbo->surf.width = mode_cmd->width;
+       qbo->surf.height = mode_cmd->height;
+       qbo->surf.stride = mode_cmd->pitches[0];
+       qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
+       ret = qxl_bo_reserve(qbo, false);
+       if (unlikely(ret != 0))
+               goto out_unref;
+       ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+       if (ret) {
+               qxl_bo_unreserve(qbo);
+               goto out_unref;
+       }
+       ret = qxl_bo_kmap(qbo, NULL);
+       qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
+       if (ret)
+               goto out_unref;
+
+       *gobj_p = gobj;
+       return 0;
+out_unref:
+       qxlfb_destroy_pinned_object(gobj);
+       *gobj_p = NULL;
+       return ret;
+}
+
+static int qxlfb_create(struct qxl_fbdev *qfbdev,
+                       struct drm_fb_helper_surface_size *sizes)
+{
+       struct qxl_device *qdev = qfbdev->qdev;
+       struct fb_info *info;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd2 mode_cmd;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qbo = NULL;
+       struct device *device = &qdev->pdev->dev;
+       int ret;
+       int size;
+       int bpp = sizes->surface_bpp;
+       int depth = sizes->surface_depth;
+       void *shadow;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+       ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+       qbo = gem_to_qxl_bo(gobj);
+       QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
+                mode_cmd.height, mode_cmd.pitches[0]);
+
+       shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+       /* TODO: what's the usual response to memory allocation errors? */
+       BUG_ON(!shadow);
+       QXL_INFO(qdev,
+       "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+                qxl_bo_gpu_offset(qbo),
+                qxl_bo_mmap_offset(qbo),
+                qbo->kptr,
+                shadow);
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+
+       info = framebuffer_alloc(0, device);
+       if (info == NULL) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->par = qfbdev;
+
+       qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+
+       fb = &qfbdev->qfb.base;
+
+       /* setup helper with fb data */
+       qfbdev->helper.fb = fb;
+       qfbdev->helper.fbdev = info;
+       qfbdev->shadow = shadow;
+       strcpy(info->fix.id, "qxldrmfb");
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+       info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+       info->fbops = &qxlfb_ops;
+
+       /*
+        * TODO: using gobj->size in various places in this function. Not sure
+        * what the difference between the different sizes is.
+        */
+       info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
+       info->fix.smem_len = gobj->size;
+       info->screen_base = qfbdev->shadow;
+       info->screen_size = gobj->size;
+
+       drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+                              sizes->fb_height);
+
+       /* setup aperture base/size for vesafb takeover */
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+       info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+       info->apertures->ranges[0].size = qdev->vram_size;
+
+       info->fix.mmio_start = 0;
+       info->fix.mmio_len = 0;
+
+       if (info->screen_base == NULL) {
+               ret = -ENOSPC;
+               goto out_unref;
+       }
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->fbdefio = &qxl_defio;
+       fb_deferred_io_init(info);
+
+       qdev->fbdev_info = info;
+       qdev->fbdev_qfb = &qfbdev->qfb;
+       DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
+       DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+       return 0;
+
+out_unref:
+       if (qbo) {
+               ret = qxl_bo_reserve(qbo, false);
+               if (likely(ret == 0)) {
+                       qxl_bo_kunmap(qbo);
+                       qxl_bo_unpin(qbo);
+                       qxl_bo_unreserve(qbo);
+               }
+       }
+       if (fb && ret) {
+               drm_gem_object_unreference(gobj);
+               drm_framebuffer_cleanup(fb);
+               kfree(fb);
+       }
+       drm_gem_object_unreference(gobj);
+       return ret;
+}
+
+static int qxl_fb_find_or_create_single(
+               struct drm_fb_helper *helper,
+               struct drm_fb_helper_surface_size *sizes)
+{
+       struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = qxlfb_create(qfbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
+static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
+{
+       struct fb_info *info;
+       struct qxl_framebuffer *qfb = &qfbdev->qfb;
+
+       if (qfbdev->helper.fbdev) {
+               info = qfbdev->helper.fbdev;
+
+               unregister_framebuffer(info);
+               framebuffer_release(info);
+       }
+       if (qfb->obj) {
+               qxlfb_destroy_pinned_object(qfb->obj);
+               qfb->obj = NULL;
+       }
+       drm_fb_helper_fini(&qfbdev->helper);
+       vfree(qfbdev->shadow);
+       drm_framebuffer_cleanup(&qfb->base);
+
+       return 0;
+}
+
+static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+       /* TODO
+       .gamma_set = qxl_crtc_fb_gamma_set,
+       .gamma_get = qxl_crtc_fb_gamma_get,
+       */
+       .fb_probe = qxl_fb_find_or_create_single,
+};
+
+int qxl_fbdev_init(struct qxl_device *qdev)
+{
+       struct qxl_fbdev *qfbdev;
+       int bpp_sel = 32; /* TODO: parameter from somewhere? */
+       int ret;
+
+       qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
+       if (!qfbdev)
+               return -ENOMEM;
+
+       qfbdev->qdev = qdev;
+       qdev->mode_info.qfbdev = qfbdev;
+       qfbdev->helper.funcs = &qxl_fb_helper_funcs;
+
+       ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
+                                1 /* num_crtc - QXL supports just 1 */,
+                                QXLFB_CONN_LIMIT);
+       if (ret) {
+               kfree(qfbdev);
+               return ret;
+       }
+
+       drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+       drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+       return 0;
+}
+
+void qxl_fbdev_fini(struct qxl_device *qdev)
+{
+       if (!qdev->mode_info.qfbdev)
+               return;
+
+       qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+       kfree(qdev->mode_info.qfbdev);
+       qdev->mode_info.qfbdev = NULL;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644 (file)
index 0000000..63c6715
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "qxl_drv.h"
+
+/* QXL fencing-
+
+   When we submit operations to the GPU we pass a release reference to the GPU
+   with them, the release reference is then added to the release ring when
+   the GPU is finished with that particular operation and has removed it from
+   its tree.
+
+   So we have can have multiple outstanding non linear fences per object.
+
+   From a TTM POV we only care if the object has any outstanding releases on
+   it.
+
+   we wait until all outstanding releases are processeed.
+
+   sync object is just a list of release ids that represent that fence on
+   that buffer.
+
+   we just add new releases onto the sync object attached to the object.
+
+   This currently uses a radix tree to store the list of release ids.
+
+   For some reason every so often qxl hw fails to release, things go wrong.
+*/
+
+
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       spin_lock(&bo->tbo.bdev->fence_lock);
+       radix_tree_insert(&qfence->tree, rel_id, qfence);
+       qfence->num_active_releases++;
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       return 0;
+}
+
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+       void *ret;
+       int retval = 0;
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       spin_lock(&bo->tbo.bdev->fence_lock);
+
+       ret = radix_tree_delete(&qfence->tree, rel_id);
+       if (ret == qfence)
+               qfence->num_active_releases--;
+       else {
+               DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
+               retval = -ENOENT;
+       }
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       return retval;
+}
+
+
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
+{
+       qfence->qdev = qdev;
+       qfence->num_active_releases = 0;
+       INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
+       return 0;
+}
+
+void qxl_fence_fini(struct qxl_fence *qfence)
+{
+       kfree(qfence->release_ids);
+       qfence->num_active_releases = 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644 (file)
index 0000000..a235693
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_gem_object_init(struct drm_gem_object *obj)
+{
+       /* we do nothings here */
+       return 0;
+}
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+       struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+
+       if (qobj)
+               qxl_bo_unref(&qobj);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+                         int alignment, int initial_domain,
+                         bool discardable, bool kernel,
+                         struct qxl_surface *surf,
+                         struct drm_gem_object **obj)
+{
+       struct qxl_bo *qbo;
+       int r;
+
+       *obj = NULL;
+       /* At least align on page size */
+       if (alignment < PAGE_SIZE)
+               alignment = PAGE_SIZE;
+       r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+       if (r) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR(
+                       "Failed to allocate GEM object (%d, %d, %u, %d)\n",
+                                 size, initial_domain, alignment, r);
+               return r;
+       }
+       *obj = &qbo->gem_base;
+
+       mutex_lock(&qdev->gem.mutex);
+       list_add_tail(&qbo->list, &qdev->gem.objects);
+       mutex_unlock(&qdev->gem.mutex);
+
+       return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+                                     struct drm_file *file_priv,
+                                     u32 domain,
+                                     size_t size,
+                                     struct qxl_surface *surf,
+                                     struct qxl_bo **qobj,
+                                     uint32_t *handle)
+{
+       struct drm_gem_object *gobj;
+       int r;
+
+       BUG_ON(!qobj);
+       BUG_ON(!handle);
+
+       r = qxl_gem_object_create(qdev, size, 0,
+                                 domain,
+                                 false, false, surf,
+                                 &gobj);
+       if (r)
+               return -ENOMEM;
+       r = drm_gem_handle_create(file_priv, gobj, handle);
+       if (r)
+               return r;
+       /* drop reference from allocate - handle holds it now */
+       *qobj = gem_to_qxl_bo(gobj);
+       drm_gem_object_unreference_unlocked(gobj);
+       return 0;
+}
+
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+                         uint64_t *gpu_addr)
+{
+       struct qxl_bo *qobj = obj->driver_private;
+       int r;
+
+       r = qxl_bo_reserve(qobj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
+       qxl_bo_unreserve(qobj);
+       return r;
+}
+
+void qxl_gem_object_unpin(struct drm_gem_object *obj)
+{
+       struct qxl_bo *qobj = obj->driver_private;
+       int r;
+
+       r = qxl_bo_reserve(qobj, false);
+       if (likely(r == 0)) {
+               qxl_bo_unpin(qobj);
+               qxl_bo_unreserve(qobj);
+       }
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+       return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+                         struct drm_file *file_priv)
+{
+}
+
+int qxl_gem_init(struct qxl_device *qdev)
+{
+       INIT_LIST_HEAD(&qdev->gem.objects);
+       return 0;
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+       qxl_bo_force_delete(qdev);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644 (file)
index 0000000..cf85620
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_image_create_helper(struct qxl_device *qdev,
+                       struct qxl_release *release,
+                       struct qxl_bo **image_bo,
+                       const uint8_t *data,
+                       int width, int height,
+                       int depth, unsigned int hash,
+                       int stride)
+{
+       struct qxl_image *image;
+       struct qxl_data_chunk *chunk;
+       int i;
+       int chunk_stride;
+       int linesize = width * depth / 8;
+       struct qxl_bo *chunk_bo;
+       int ret;
+       void *ptr;
+       /* Chunk */
+       /* FIXME: Check integer overflow */
+       /* TODO: variable number of chunks */
+       chunk_stride = stride; /* TODO: should use linesize, but it renders
+                                 wrong (check the bitmaps are sent correctly
+                                 first) */
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
+                                   &chunk_bo);
+       
+       ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
+       chunk = ptr;
+       chunk->data_size = height * chunk_stride;
+       chunk->prev_chunk = 0;
+       chunk->next_chunk = 0;
+       qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+
+       {
+               void *k_data, *i_data;
+               int remain;
+               int page;
+               int size;
+               if (stride == linesize && chunk_stride == stride) {
+                       remain = linesize * height;
+                       page = 0;
+                       i_data = (void *)data;
+
+                       while (remain > 0) {
+                               ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+                               if (page == 0) {
+                                       chunk = ptr;
+                                       k_data = chunk->data;
+                                       size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
+                               } else {
+                                       k_data = ptr;
+                                       size = PAGE_SIZE;
+                               }
+                               size = min(size, remain);
+
+                               memcpy(k_data, i_data, size);
+
+                               qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+                               i_data += size;
+                               remain -= size;
+                               page++;
+                       }
+               } else {
+                       unsigned page_base, page_offset, out_offset;
+                       for (i = 0 ; i < height ; ++i) {
+                               i_data = (void *)data + i * stride;
+                               remain = linesize;
+                               out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
+
+                               while (remain > 0) {
+                                       page_base = out_offset & PAGE_MASK;
+                                       page_offset = offset_in_page(out_offset);
+                                       
+                                       size = min((int)(PAGE_SIZE - page_offset), remain);
+
+                                       ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
+                                       k_data = ptr + page_offset;
+                                       memcpy(k_data, i_data, size);
+                                       qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+                                       remain -= size;
+                                       i_data += size;
+                                       out_offset += size;
+                               }
+                       }
+               }
+       }
+
+
+       qxl_bo_kunmap(chunk_bo);
+
+       /* Image */
+       ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
+
+       ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+       image = ptr;
+
+       image->descriptor.id = 0;
+       image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+       image->descriptor.flags = 0;
+       image->descriptor.width = width;
+       image->descriptor.height = height;
+
+       switch (depth) {
+       case 1:
+               /* TODO: BE? check by arch? */
+               image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+               break;
+       case 24:
+               image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+               break;
+       case 32:
+               image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+               break;
+       default:
+               DRM_ERROR("unsupported image bit depth\n");
+               return -EINVAL; /* TODO: cleanup */
+       }
+       image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+       image->u.bitmap.x = width;
+       image->u.bitmap.y = height;
+       image->u.bitmap.stride = chunk_stride;
+       image->u.bitmap.palette = 0;
+       image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+       qxl_release_add_res(qdev, release, chunk_bo);
+       qxl_bo_unreserve(chunk_bo);
+       qxl_bo_unref(&chunk_bo);
+
+       qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+
+       return 0;
+}
+
+int qxl_image_create(struct qxl_device *qdev,
+                    struct qxl_release *release,
+                    struct qxl_bo **image_bo,
+                    const uint8_t *data,
+                    int x, int y, int width, int height,
+                    int depth, int stride)
+{
+       data += y * stride + x * (depth / 8);
+       return qxl_image_create_helper(qdev, release, image_bo, data,
+                                      width, height, depth, 0, stride);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644 (file)
index 0000000..04b64f9
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_alloc *qxl_alloc = data;
+       int ret;
+       struct qxl_bo *qobj;
+       uint32_t handle;
+       u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+       if (qxl_alloc->size == 0) {
+               DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+               return -EINVAL;
+       }
+       ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+                                               domain,
+                                               qxl_alloc->size,
+                                               NULL,
+                                               &qobj, &handle);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n",
+                         __func__, ret);
+               return -ENOMEM;
+       }
+       qxl_alloc->handle = handle;
+       return 0;
+}
+
+static int qxl_map_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_map *qxl_map = data;
+
+       return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+                                 &qxl_map->offset);
+}
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+           struct qxl_bo *src, uint64_t src_off)
+{
+       void *reloc_page;
+
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+       *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+                                                                    src, src_off);
+       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+                struct qxl_bo *src)
+{
+       uint32_t id = 0;
+       void *reloc_page;
+
+       if (src && !src->is_primary)
+               id = src->surface_id;
+
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+       *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
+       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+/* return holding the reference to this object */
+static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+                                        struct drm_file *file_priv, uint64_t handle,
+                                        struct qxl_reloc_list *reloc_list)
+{
+       struct drm_gem_object *gobj;
+       struct qxl_bo *qobj;
+       int ret;
+
+       gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+       if (!gobj) {
+               DRM_ERROR("bad bo handle %lld\n", handle);
+               return NULL;
+       }
+       qobj = gem_to_qxl_bo(gobj);
+
+       ret = qxl_bo_list_add(reloc_list, qobj);
+       if (ret)
+               return NULL;
+
+       return qobj;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_execbuffer *execbuffer = data;
+       struct drm_qxl_command user_cmd;
+       int cmd_num;
+       struct qxl_bo *reloc_src_bo;
+       struct qxl_bo *reloc_dst_bo;
+       struct drm_qxl_reloc reloc;
+       void *fb_cmd;
+       int i, ret;
+       struct qxl_reloc_list reloc_list;
+       int unwritten;
+       uint32_t reloc_dst_offset;
+       INIT_LIST_HEAD(&reloc_list.bos);
+
+       for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+               struct qxl_release *release;
+               struct qxl_bo *cmd_bo;
+               int release_type;
+               struct drm_qxl_command *commands =
+                       (struct drm_qxl_command *)execbuffer->commands;
+
+               if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+                                      sizeof(user_cmd)))
+                       return -EFAULT;
+               switch (user_cmd.type) {
+               case QXL_CMD_DRAW:
+                       release_type = QXL_RELEASE_DRAWABLE;
+                       break;
+               case QXL_CMD_SURFACE:
+               case QXL_CMD_CURSOR:
+               default:
+                       DRM_DEBUG("Only draw commands in execbuffers\n");
+                       return -EINVAL;
+                       break;
+               }
+
+               if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+                       return -EINVAL;
+
+               ret = qxl_alloc_release_reserved(qdev,
+                                                sizeof(union qxl_release_info) +
+                                                user_cmd.command_size,
+                                                release_type,
+                                                &release,
+                                                &cmd_bo);
+               if (ret)
+                       return ret;
+
+               /* TODO copy slow path code from i915 */
+               fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+               unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+               qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+               if (unwritten) {
+                       DRM_ERROR("got unwritten %d\n", unwritten);
+                       qxl_release_unreserve(qdev, release);
+                       qxl_release_free(qdev, release);
+                       return -EFAULT;
+               }
+
+               for (i = 0 ; i < user_cmd.relocs_num; ++i) {
+                       if (DRM_COPY_FROM_USER(&reloc,
+                                              &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+                                              sizeof(reloc))) {
+                               qxl_bo_list_unreserve(&reloc_list, true);
+                               qxl_release_unreserve(qdev, release);
+                               qxl_release_free(qdev, release);
+                               return -EFAULT;
+                       }
+
+                       /* add the bos to the list of bos to validate -
+                          need to validate first then process relocs? */
+                       if (reloc.dst_handle) {
+                               reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+                                                                 reloc.dst_handle, &reloc_list);
+                               if (!reloc_dst_bo) {
+                                       qxl_bo_list_unreserve(&reloc_list, true);
+                                       qxl_release_unreserve(qdev, release);
+                                       qxl_release_free(qdev, release);
+                                       return -EINVAL;
+                               }
+                               reloc_dst_offset = 0;
+                       } else {
+                               reloc_dst_bo = cmd_bo;
+                               reloc_dst_offset = release->release_offset;
+                       }
+
+                       /* reserve and validate the reloc dst bo */
+                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+                               reloc_src_bo =
+                                       qxlhw_handle_to_bo(qdev, file_priv,
+                                                          reloc.src_handle, &reloc_list);
+                               if (!reloc_src_bo) {
+                                       if (reloc_dst_bo != cmd_bo)
+                                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+                                       qxl_bo_list_unreserve(&reloc_list, true);
+                                       qxl_release_unreserve(qdev, release);
+                                       qxl_release_free(qdev, release);
+                                       return -EINVAL;
+                               }
+                       } else
+                               reloc_src_bo = NULL;
+                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
+                               apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
+                                           reloc_src_bo, reloc.src_offset);
+                       } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
+                               apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
+                       } else {
+                               DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
+                               return -EINVAL;
+                       }
+
+                       if (reloc_src_bo && reloc_src_bo != cmd_bo) {
+                               qxl_release_add_res(qdev, release, reloc_src_bo);
+                               drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
+                       }
+
+                       if (reloc_dst_bo != cmd_bo)
+                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+               }
+               qxl_fence_releaseable(qdev, release);
+
+               ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
+               if (ret == -ERESTARTSYS) {
+                       qxl_release_unreserve(qdev, release);
+                       qxl_release_free(qdev, release);
+                       qxl_bo_list_unreserve(&reloc_list, true);
+                       return ret;
+               }
+               qxl_release_unreserve(qdev, release);
+       }
+       qxl_bo_list_unreserve(&reloc_list, 0);
+       return 0;
+}
+
+static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+                                struct drm_file *file)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_update_area *update_area = data;
+       struct qxl_rect area = {.left = update_area->left,
+                               .top = update_area->top,
+                               .right = update_area->right,
+                               .bottom = update_area->bottom};
+       int ret;
+       struct drm_gem_object *gobj = NULL;
+       struct qxl_bo *qobj = NULL;
+
+       if (update_area->left >= update_area->right ||
+           update_area->top >= update_area->bottom)
+               return -EINVAL;
+
+       gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+       if (gobj == NULL)
+               return -ENOENT;
+
+       qobj = gem_to_qxl_bo(gobj);
+
+       ret = qxl_bo_reserve(qobj, false);
+       if (ret)
+               goto out;
+
+       if (!qobj->pin_count) {
+               ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+                                     true, false);
+               if (unlikely(ret))
+                       goto out;
+       }
+
+       ret = qxl_bo_check_id(qdev, qobj);
+       if (ret)
+               goto out2;
+       if (!qobj->surface_id)
+               DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+       ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+       qxl_bo_unreserve(qobj);
+
+out:
+       drm_gem_object_unreference_unlocked(gobj);
+       return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_getparam *param = data;
+
+       switch (param->param) {
+       case QXL_PARAM_NUM_SURFACES:
+               param->value = qdev->rom->n_surfaces;
+               break;
+       case QXL_PARAM_MAX_RELOCS:
+               param->value = QXL_MAX_RES;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_clientcap *param = data;
+       int byte, idx;
+
+       byte = param->index / 8;
+       idx = param->index % 8;
+
+       if (qdev->pdev->revision < 4)
+               return -ENOSYS;
+
+       if (byte >= 58)
+               return -ENOSYS;
+
+       if (qdev->rom->client_capabilities[byte] & (1 << idx))
+               return 0;
+       return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file)
+{
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_alloc_surf *param = data;
+       struct qxl_bo *qobj;
+       int handle;
+       int ret;
+       int size, actual_stride;
+       struct qxl_surface surf;
+
+       /* work out size allocate bo with handle */
+       actual_stride = param->stride < 0 ? -param->stride : param->stride;
+       size = actual_stride * param->height + actual_stride;
+
+       surf.format = param->format;
+       surf.width = param->width;
+       surf.height = param->height;
+       surf.stride = param->stride;
+       surf.data = 0;
+
+       ret = qxl_gem_object_create_with_handle(qdev, file,
+                                               QXL_GEM_DOMAIN_SURFACE,
+                                               size,
+                                               &surf,
+                                               &qobj, &handle);
+       if (ret) {
+               DRM_ERROR("%s: failed to create gem ret=%d\n",
+                         __func__, ret);
+               return -ENOMEM;
+       } else
+               param->handle = handle;
+       return ret;
+}
+
+struct drm_ioctl_desc qxl_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+                                                       DRM_AUTH|DRM_UNLOCKED),
+
+       DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+                         DRM_AUTH|DRM_UNLOCKED),
+};
+
+int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644 (file)
index 0000000..21393dc
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+       uint32_t pending;
+
+       pending = xchg(&qdev->ram_header->int_pending, 0);
+
+       atomic_inc(&qdev->irq_received);
+
+       if (pending & QXL_INTERRUPT_DISPLAY) {
+               atomic_inc(&qdev->irq_received_display);
+               wake_up_all(&qdev->display_event);
+               qxl_queue_garbage_collect(qdev, false);
+       }
+       if (pending & QXL_INTERRUPT_CURSOR) {
+               atomic_inc(&qdev->irq_received_cursor);
+               wake_up_all(&qdev->cursor_event);
+       }
+       if (pending & QXL_INTERRUPT_IO_CMD) {
+               atomic_inc(&qdev->irq_received_io_cmd);
+               wake_up_all(&qdev->io_cmd_event);
+       }
+       if (pending & QXL_INTERRUPT_ERROR) {
+               /* TODO: log it, reset device (only way to exit this condition)
+                * (do it a certain number of times, afterwards admit defeat,
+                * to avoid endless loops).
+                */
+               qdev->irq_received_error++;
+               qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+       }
+       if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+               qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
+               schedule_work(&qdev->client_monitors_config_work);
+       }
+       qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+       outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+       return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+       struct qxl_device *qdev = container_of(work, struct qxl_device,
+                                              client_monitors_config_work);
+
+       qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+       int ret;
+
+       init_waitqueue_head(&qdev->display_event);
+       init_waitqueue_head(&qdev->cursor_event);
+       init_waitqueue_head(&qdev->io_cmd_event);
+       INIT_WORK(&qdev->client_monitors_config_work,
+                 qxl_client_monitors_config_work_func);
+       atomic_set(&qdev->irq_received, 0);
+       atomic_set(&qdev->irq_received_display, 0);
+       atomic_set(&qdev->irq_received_cursor, 0);
+       atomic_set(&qdev->irq_received_io_cmd, 0);
+       qdev->irq_received_error = 0;
+       ret = drm_irq_install(qdev->ddev);
+       qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed installing irq: %d\n", ret);
+               return 1;
+       }
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644 (file)
index 0000000..85127ed
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+
+int qxl_log_level;
+
+static void qxl_dump_mode(struct qxl_device *qdev, void *p)
+{
+       struct qxl_mode *m = p;
+       DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
+                     m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
+                     m->y_mili, m->orientation);
+}
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+       struct qxl_rom *rom = qdev->rom;
+       int mode_offset;
+       int i;
+
+       if (rom->magic != 0x4f525851) {
+               DRM_ERROR("bad rom signature %x\n", rom->magic);
+               return false;
+       }
+
+       DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+       DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+                rom->log_level);
+       DRM_INFO("Currently using mode #%d, list at 0x%x\n",
+                rom->mode, rom->modes_offset);
+       DRM_INFO("%d io pages at offset 0x%x\n",
+                rom->num_io_pages, rom->pages_offset);
+       DRM_INFO("%d byte draw area at offset 0x%x\n",
+                rom->surface0_area_size, rom->draw_area_offset);
+
+       qdev->vram_size = rom->surface0_area_size;
+       DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+
+       mode_offset = rom->modes_offset / 4;
+       qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
+       DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
+                qdev->mode_info.num_modes);
+       qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
+       for (i = 0; i < qdev->mode_info.num_modes; i++)
+               qxl_dump_mode(qdev, qdev->mode_info.modes + i);
+       return true;
+}
+
+static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
+       unsigned long start_phys_addr, unsigned long end_phys_addr)
+{
+       uint64_t high_bits;
+       struct qxl_memslot *slot;
+       uint8_t slot_index;
+       struct qxl_ram_header *ram_header = qdev->ram_header;
+
+       slot_index = qdev->rom->slots_start + slot_index_offset;
+       slot = &qdev->mem_slots[slot_index];
+       slot->start_phys_addr = start_phys_addr;
+       slot->end_phys_addr = end_phys_addr;
+       ram_header->mem_slot.mem_start = slot->start_phys_addr;
+       ram_header->mem_slot.mem_end = slot->end_phys_addr;
+       qxl_io_memslot_add(qdev, slot_index);
+       slot->generation = qdev->rom->slot_generation;
+       high_bits = slot_index << qdev->slot_gen_bits;
+       high_bits |= slot->generation;
+       high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+       slot->high_bits = high_bits;
+       return slot_index;
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+       struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+       qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+                   struct drm_device *ddev,
+                   struct pci_dev *pdev,
+                   unsigned long flags)
+{
+       int r;
+
+       qdev->dev = &pdev->dev;
+       qdev->ddev = ddev;
+       qdev->pdev = pdev;
+       qdev->flags = flags;
+
+       mutex_init(&qdev->gem.mutex);
+       mutex_init(&qdev->update_area_mutex);
+       mutex_init(&qdev->release_mutex);
+       mutex_init(&qdev->surf_evict_mutex);
+       INIT_LIST_HEAD(&qdev->gem.objects);
+
+       qdev->rom_base = pci_resource_start(pdev, 2);
+       qdev->rom_size = pci_resource_len(pdev, 2);
+       qdev->vram_base = pci_resource_start(pdev, 0);
+       qdev->surfaceram_base = pci_resource_start(pdev, 1);
+       qdev->surfaceram_size = pci_resource_len(pdev, 1);
+       qdev->io_base = pci_resource_start(pdev, 3);
+
+       qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+       qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
+       DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
+                (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+                (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+                (int)pci_resource_len(pdev, 0) / 1024,
+                (void *)qdev->surfaceram_base,
+                (void *)pci_resource_end(pdev, 1),
+                (int)qdev->surfaceram_size / 1024 / 1024,
+                (int)qdev->surfaceram_size / 1024);
+
+       qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+       if (!qdev->rom) {
+               pr_err("Unable to ioremap ROM\n");
+               return -ENOMEM;
+       }
+
+       qxl_check_device(qdev);
+
+       r = qxl_bo_init(qdev);
+       if (r) {
+               DRM_ERROR("bo init failed %d\n", r);
+               return r;
+       }
+
+       qdev->ram_header = ioremap(qdev->vram_base +
+                                  qdev->rom->ram_header_offset,
+                                  sizeof(*qdev->ram_header));
+
+       qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+                                            sizeof(struct qxl_command),
+                                            QXL_COMMAND_RING_SIZE,
+                                            qdev->io_base + QXL_IO_NOTIFY_CMD,
+                                            false,
+                                            &qdev->display_event);
+
+       qdev->cursor_ring = qxl_ring_create(
+                               &(qdev->ram_header->cursor_ring_hdr),
+                               sizeof(struct qxl_command),
+                               QXL_CURSOR_RING_SIZE,
+                               qdev->io_base + QXL_IO_NOTIFY_CMD,
+                               false,
+                               &qdev->cursor_event);
+
+       qdev->release_ring = qxl_ring_create(
+                               &(qdev->ram_header->release_ring_hdr),
+                               sizeof(uint64_t),
+                               QXL_RELEASE_RING_SIZE, 0, true,
+                               NULL);
+
+       /* TODO - slot initialization should happen on reset. where is our
+        * reset handler? */
+       qdev->n_mem_slots = qdev->rom->slots_end;
+       qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
+       qdev->slot_id_bits = qdev->rom->slot_id_bits;
+       qdev->va_slot_mask =
+               (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
+
+       qdev->mem_slots =
+               kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
+                       GFP_KERNEL);
+
+       idr_init(&qdev->release_idr);
+       spin_lock_init(&qdev->release_idr_lock);
+
+       idr_init(&qdev->surf_id_idr);
+       spin_lock_init(&qdev->surf_id_idr_lock);
+
+       mutex_init(&qdev->async_io_mutex);
+
+       /* reset the device into a known state - no memslots, no primary
+        * created, no surfaces. */
+       qxl_io_reset(qdev);
+
+       /* must initialize irq before first async io - slot creation */
+       r = qxl_irq_init(qdev);
+       if (r)
+               return r;
+
+       /*
+        * Note that virtual is surface0. We rely on the single ioremap done
+        * before.
+        */
+       qdev->main_mem_slot = setup_slot(qdev, 0,
+               (unsigned long)qdev->vram_base,
+               (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
+       qdev->surfaces_mem_slot = setup_slot(qdev, 1,
+               (unsigned long)qdev->surfaceram_base,
+               (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
+       DRM_INFO("main mem slot %d [%lx,%x)\n",
+               qdev->main_mem_slot,
+               (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+
+
+       qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
+       INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+       r = qxl_fb_init(qdev);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static void qxl_device_fini(struct qxl_device *qdev)
+{
+       if (qdev->current_release_bo[0])
+               qxl_bo_unref(&qdev->current_release_bo[0]);
+       if (qdev->current_release_bo[1])
+               qxl_bo_unref(&qdev->current_release_bo[1]);
+       flush_workqueue(qdev->gc_queue);
+       destroy_workqueue(qdev->gc_queue);
+       qdev->gc_queue = NULL;
+
+       qxl_ring_free(qdev->command_ring);
+       qxl_ring_free(qdev->cursor_ring);
+       qxl_ring_free(qdev->release_ring);
+       qxl_bo_fini(qdev);
+       io_mapping_free(qdev->surface_mapping);
+       io_mapping_free(qdev->vram_mapping);
+       iounmap(qdev->ram_header);
+       iounmap(qdev->rom);
+       qdev->rom = NULL;
+       qdev->mode_info.modes = NULL;
+       qdev->mode_info.num_modes = 0;
+       qxl_debugfs_remove_files(qdev);
+}
+
+int qxl_driver_unload(struct drm_device *dev)
+{
+       struct qxl_device *qdev = dev->dev_private;
+
+       if (qdev == NULL)
+               return 0;
+       qxl_modeset_fini(qdev);
+       qxl_device_fini(qdev);
+
+       kfree(qdev);
+       dev->dev_private = NULL;
+       return 0;
+}
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+       struct qxl_device *qdev;
+       int r;
+
+       /* require kms */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -ENODEV;
+
+       qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+       if (qdev == NULL)
+               return -ENOMEM;
+
+       dev->dev_private = qdev;
+
+       r = qxl_device_init(qdev, dev, dev->pdev, flags);
+       if (r)
+               goto out;
+
+       r = qxl_modeset_init(qdev);
+       if (r) {
+               qxl_driver_unload(dev);
+               goto out;
+       }
+
+       return 0;
+out:
+       kfree(qdev);
+       return r;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644 (file)
index 0000000..d9b12e7
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+       struct qxl_bo *bo;
+       struct qxl_device *qdev;
+
+       bo = container_of(tbo, struct qxl_bo, tbo);
+       qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+
+       qxl_surface_evict(qdev, bo, false);
+       qxl_fence_fini(&bo->fence);
+       mutex_lock(&qdev->gem.mutex);
+       list_del_init(&bo->list);
+       mutex_unlock(&qdev->gem.mutex);
+       drm_gem_object_release(&bo->gem_base);
+       kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+       if (bo->destroy == &qxl_ttm_bo_destroy)
+               return true;
+       return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+{
+       u32 c = 0;
+
+       qbo->placement.fpfn = 0;
+       qbo->placement.lpfn = 0;
+       qbo->placement.placement = qbo->placements;
+       qbo->placement.busy_placement = qbo->placements;
+       if (domain == QXL_GEM_DOMAIN_VRAM)
+               qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+       if (domain == QXL_GEM_DOMAIN_SURFACE)
+               qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+       if (domain == QXL_GEM_DOMAIN_CPU)
+               qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (!c)
+               qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       qbo->placement.num_placement = c;
+       qbo->placement.num_busy_placement = c;
+}
+
+
+int qxl_bo_create(struct qxl_device *qdev,
+                 unsigned long size, bool kernel, u32 domain,
+                 struct qxl_surface *surf,
+                 struct qxl_bo **bo_ptr)
+{
+       struct qxl_bo *bo;
+       enum ttm_bo_type type;
+       int r;
+
+       if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+               qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+       if (kernel)
+               type = ttm_bo_type_kernel;
+       else
+               type = ttm_bo_type_device;
+       *bo_ptr = NULL;
+       bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+       if (bo == NULL)
+               return -ENOMEM;
+       size = roundup(size, PAGE_SIZE);
+       r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+       if (unlikely(r)) {
+               kfree(bo);
+               return r;
+       }
+       bo->gem_base.driver_private = NULL;
+       bo->type = domain;
+       bo->pin_count = 0;
+       bo->surface_id = 0;
+       qxl_fence_init(qdev, &bo->fence);
+       INIT_LIST_HEAD(&bo->list);
+       atomic_set(&bo->reserve_count, 0);
+       if (surf)
+               bo->surf = *surf;
+
+       qxl_ttm_placement_from_domain(bo, domain);
+
+       r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+                       &bo->placement, 0, !kernel, NULL, size,
+                       NULL, &qxl_ttm_bo_destroy);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS)
+                       dev_err(qdev->dev,
+                               "object_init failed for (%lu, 0x%08X)\n",
+                               size, domain);
+               return r;
+       }
+       *bo_ptr = bo;
+       return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+       bool is_iomem;
+       int r;
+
+       if (bo->kptr) {
+               if (ptr)
+                       *ptr = bo->kptr;
+               return 0;
+       }
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+       if (r)
+               return r;
+       bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+       if (ptr)
+               *ptr = bo->kptr;
+       return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+                             struct qxl_bo *bo, int page_offset)
+{
+       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+       void *rptr;
+       int ret;
+       struct io_mapping *map;
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               map = qdev->vram_mapping;
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+               map = qdev->surface_mapping;
+       else
+               goto fallback;
+
+       (void) ttm_mem_io_lock(man, false);
+       ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+       ttm_mem_io_unlock(man);
+
+       return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+       if (bo->kptr) {
+               rptr = bo->kptr + (page_offset * PAGE_SIZE);
+               return rptr;
+       }
+
+       ret = qxl_bo_kmap(bo, &rptr);
+       if (ret)
+               return NULL;
+
+       rptr += page_offset * PAGE_SIZE;
+       return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+       if (bo->kptr == NULL)
+               return;
+       bo->kptr = NULL;
+       ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+                              struct qxl_bo *bo, void *pmap)
+{
+       struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+       struct io_mapping *map;
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+               map = qdev->vram_mapping;
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+               map = qdev->surface_mapping;
+       else
+               goto fallback;
+
+       io_mapping_unmap_atomic(pmap);
+
+       (void) ttm_mem_io_lock(man, false);
+       ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+       ttm_mem_io_unlock(man);
+       return ;
+ fallback:
+       qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+       struct ttm_buffer_object *tbo;
+
+       if ((*bo) == NULL)
+               return;
+       tbo = &((*bo)->tbo);
+       ttm_bo_unref(&tbo);
+       if (tbo == NULL)
+               *bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+       ttm_bo_reference(&bo->tbo);
+       return bo;
+}
+
+int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+{
+       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+       int r, i;
+
+       if (bo->pin_count) {
+               bo->pin_count++;
+               if (gpu_addr)
+                       *gpu_addr = qxl_bo_gpu_offset(bo);
+               return 0;
+       }
+       qxl_ttm_placement_from_domain(bo, domain);
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (likely(r == 0)) {
+               bo->pin_count = 1;
+               if (gpu_addr != NULL)
+                       *gpu_addr = qxl_bo_gpu_offset(bo);
+       }
+       if (unlikely(r != 0))
+               dev_err(qdev->dev, "%p pin failed\n", bo);
+       return r;
+}
+
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+       int r, i;
+
+       if (!bo->pin_count) {
+               dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+               return 0;
+       }
+       bo->pin_count--;
+       if (bo->pin_count)
+               return 0;
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (unlikely(r != 0))
+               dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+       return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+       struct qxl_bo *bo, *n;
+
+       if (list_empty(&qdev->gem.objects))
+               return;
+       dev_err(qdev->dev, "Userspace still has active objects !\n");
+       list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+               mutex_lock(&qdev->ddev->struct_mutex);
+               dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+                       &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+                       *((unsigned long *)&bo->gem_base.refcount));
+               mutex_lock(&qdev->gem.mutex);
+               list_del_init(&bo->list);
+               mutex_unlock(&qdev->gem.mutex);
+               /* this should unref the ttm bo */
+               drm_gem_object_unreference(&bo->gem_base);
+               mutex_unlock(&qdev->ddev->struct_mutex);
+       }
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+       return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+       qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+       int ret;
+       if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+               /* allocate a surface id for this surface now */
+               ret = qxl_surface_id_alloc(qdev, bo);
+               if (ret)
+                       return ret;
+
+               ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
+{
+       struct qxl_bo_list *entry, *sf;
+
+       list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
+               qxl_bo_unreserve(entry->bo);
+               list_del(&entry->lhead);
+               kfree(entry);
+       }
+}
+
+int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
+{
+       struct qxl_bo_list *entry;
+       int ret;
+
+       list_for_each_entry(entry, &reloc_list->bos, lhead) {
+               if (entry->bo == bo)
+                       return 0;
+       }
+
+       entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->bo = bo;
+       list_add(&entry->lhead, &reloc_list->bos);
+
+       ret = qxl_bo_reserve(bo, false);
+       if (ret)
+               return ret;
+
+       if (!bo->pin_count) {
+               qxl_ttm_placement_from_domain(bo, bo->type);
+               ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+                                     true, false);
+               if (ret)
+                       return ret;
+       }
+
+       /* allocate a surface for reserved + validated buffers */
+       ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+       if (ret)
+               return ret;
+       return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644 (file)
index 0000000..b4fd89f
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+       int r;
+
+       r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS) {
+                       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+                       dev_err(qdev->dev, "%p reserve failed\n", bo);
+               }
+               return r;
+       }
+       return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+       ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+       return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+       return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
+{
+       return !!atomic_read(&bo->tbo.reserved);
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+       return bo->tbo.addr_space_offset;
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+                             bool no_wait)
+{
+       int r;
+
+       r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS) {
+                       struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+                       dev_err(qdev->dev, "%p reserve failed for wait\n",
+                               bo);
+               }
+               return r;
+       }
+       spin_lock(&bo->tbo.bdev->fence_lock);
+       if (mem_type)
+               *mem_type = bo->tbo.mem.mem_type;
+       if (bo->tbo.sync_obj)
+               r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+       spin_unlock(&bo->tbo.bdev->fence_lock);
+       ttm_bo_unreserve(&bo->tbo);
+       return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+                        unsigned long size,
+                        bool kernel, u32 domain,
+                        struct qxl_surface *surf,
+                        struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
+extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644 (file)
index 0000000..c4267c7
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+                 struct qxl_release **ret)
+{
+       struct qxl_release *release;
+       int handle = 0;
+       size_t size = sizeof(*release);
+       int idr_ret;
+
+       release = kmalloc(size, GFP_KERNEL);
+       if (!release) {
+               DRM_ERROR("Out of memory\n");
+               return 0;
+       }
+       release->type = type;
+       release->bo_count = 0;
+       release->release_offset = 0;
+       release->surface_release_id = 0;
+again:
+       if (idr_pre_get(&qdev->release_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Out of memory for release idr\n");
+               kfree(release);
+               goto release_fail;
+       }
+       spin_lock(&qdev->release_idr_lock);
+       idr_ret = idr_get_new_above(&qdev->release_idr, release, 1, &handle);
+       spin_unlock(&qdev->release_idr_lock);
+       if (idr_ret == -EAGAIN)
+               goto again;
+       if (ret)
+               *ret = release;
+       QXL_INFO(qdev, "allocated release %lld\n", handle);
+       release->id = handle;
+release_fail:
+
+       return handle;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+                struct qxl_release *release)
+{
+       int i;
+
+       QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
+                release->type, release->bo_count);
+
+       if (release->surface_release_id)
+               qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+       for (i = 0 ; i < release->bo_count; ++i) {
+               QXL_INFO(qdev, "release %llx\n",
+                       release->bos[i]->tbo.addr_space_offset
+                                               - DRM_FILE_OFFSET);
+               qxl_fence_remove_release(&release->bos[i]->fence, release->id);
+               qxl_bo_unref(&release->bos[i]);
+       }
+       spin_lock(&qdev->release_idr_lock);
+       idr_remove(&qdev->release_idr, release->id);
+       spin_unlock(&qdev->release_idr_lock);
+       kfree(release);
+}
+
+void
+qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
+                   struct qxl_bo *bo)
+{
+       int i;
+       for (i = 0; i < release->bo_count; i++)
+               if (release->bos[i] == bo)
+                       return;
+
+       if (release->bo_count >= QXL_MAX_RES) {
+               DRM_ERROR("exceeded max resource on a qxl_release item\n");
+               return;
+       }
+       release->bos[release->bo_count++] = qxl_bo_ref(bo);
+}
+
+static int qxl_release_bo_alloc(struct qxl_device *qdev,
+                               struct qxl_bo **bo)
+{
+       int ret;
+       ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+                           bo);
+       return ret;
+}
+
+int qxl_release_reserve(struct qxl_device *qdev,
+                       struct qxl_release *release, bool no_wait)
+{
+       int ret;
+       if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
+               ret = qxl_bo_reserve(release->bos[0], no_wait);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+void qxl_release_unreserve(struct qxl_device *qdev,
+                         struct qxl_release *release)
+{
+       if (atomic_dec_and_test(&release->bos[0]->reserve_count))
+               qxl_bo_unreserve(release->bos[0]);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+                                      enum qxl_surface_cmd_type surface_cmd_type,
+                                      struct qxl_release *create_rel,
+                                      struct qxl_release **release)
+{
+       int ret;
+
+       if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+               int idr_ret;
+               struct qxl_bo *bo;
+               union qxl_release_info *info;
+
+               /* stash the release after the create command */
+               idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+               bo = qxl_bo_ref(create_rel->bos[0]);
+
+               (*release)->release_offset = create_rel->release_offset + 64;
+
+               qxl_release_add_res(qdev, *release, bo);
+
+               ret = qxl_release_reserve(qdev, *release, false);
+               if (ret) {
+                       DRM_ERROR("release reserve failed\n");
+                       goto out_unref;
+               }
+               info = qxl_release_map(qdev, *release);
+               info->id = idr_ret;
+               qxl_release_unmap(qdev, *release, info);
+
+
+out_unref:
+               qxl_bo_unref(&bo);
+               return ret;
+       }
+
+       return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+                                        QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+                                      int type, struct qxl_release **release,
+                                      struct qxl_bo **rbo)
+{
+       struct qxl_bo *bo;
+       int idr_ret;
+       int ret;
+       union qxl_release_info *info;
+       int cur_idx;
+
+       if (type == QXL_RELEASE_DRAWABLE)
+               cur_idx = 0;
+       else if (type == QXL_RELEASE_SURFACE_CMD)
+               cur_idx = 1;
+       else if (type == QXL_RELEASE_CURSOR_CMD)
+               cur_idx = 2;
+       else {
+               DRM_ERROR("got illegal type: %d\n", type);
+               return -EINVAL;
+       }
+
+       idr_ret = qxl_release_alloc(qdev, type, release);
+
+       mutex_lock(&qdev->release_mutex);
+       if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+               qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+               qdev->current_release_bo_offset[cur_idx] = 0;
+               qdev->current_release_bo[cur_idx] = NULL;
+       }
+       if (!qdev->current_release_bo[cur_idx]) {
+               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+               if (ret) {
+                       mutex_unlock(&qdev->release_mutex);
+                       return ret;
+               }
+
+               /* pin releases bo's they are too messy to evict */
+               ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
+               qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
+               qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
+       }
+
+       bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+       (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+       qdev->current_release_bo_offset[cur_idx]++;
+
+       if (rbo)
+               *rbo = bo;
+
+       qxl_release_add_res(qdev, *release, bo);
+
+       ret = qxl_release_reserve(qdev, *release, false);
+       mutex_unlock(&qdev->release_mutex);
+       if (ret)
+               goto out_unref;
+
+       info = qxl_release_map(qdev, *release);
+       info->id = idr_ret;
+       qxl_release_unmap(qdev, *release, info);
+
+out_unref:
+       qxl_bo_unref(&bo);
+       return ret;
+}
+
+int qxl_fence_releaseable(struct qxl_device *qdev,
+                         struct qxl_release *release)
+{
+       int i, ret;
+       for (i = 0; i < release->bo_count; i++) {
+               if (!release->bos[i]->tbo.sync_obj)
+                       release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
+               ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+                                                  uint64_t id)
+{
+       struct qxl_release *release;
+
+       spin_lock(&qdev->release_idr_lock);
+       release = idr_find(&qdev->release_idr, id);
+       spin_unlock(&qdev->release_idr_lock);
+       if (!release) {
+               DRM_ERROR("failed to find id in release_idr\n");
+               return NULL;
+       }
+       if (release->bo_count < 1) {
+               DRM_ERROR("read a released resource with 0 bos\n");
+               return NULL;
+       }
+       return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+                                       struct qxl_release *release)
+{
+       void *ptr;
+       union qxl_release_info *info;
+       struct qxl_bo *bo = release->bos[0];
+
+       ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+       info = ptr + (release->release_offset & ~PAGE_SIZE);
+       return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+                      struct qxl_release *release,
+                      union qxl_release_info *info)
+{
+       struct qxl_bo *bo = release->bos[0];
+       void *ptr;
+
+       ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+       qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644 (file)
index 0000000..1a86242
--- /dev/null
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/qxl_drm.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/delay.h>
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+       struct qxl_mman *mman;
+       struct qxl_device *qdev;
+
+       mman = container_of(bdev, struct qxl_mman, bdev);
+       qdev = container_of(mman, struct qxl_device, mman);
+       return qdev;
+}
+
+static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+       return ttm_mem_global_init(ref->object);
+}
+
+static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+       ttm_mem_global_release(ref->object);
+}
+
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+       struct drm_global_reference *global_ref;
+       int r;
+
+       qdev->mman.mem_global_referenced = false;
+       global_ref = &qdev->mman.mem_global_ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+       global_ref->size = sizeof(struct ttm_mem_global);
+       global_ref->init = &qxl_ttm_mem_global_init;
+       global_ref->release = &qxl_ttm_mem_global_release;
+
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM memory accounting "
+                         "subsystem.\n");
+               return r;
+       }
+
+       qdev->mman.bo_global_ref.mem_glob =
+               qdev->mman.mem_global_ref.object;
+       global_ref = &qdev->mman.bo_global_ref.ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_BO;
+       global_ref->size = sizeof(struct ttm_bo_global);
+       global_ref->init = &ttm_bo_global_init;
+       global_ref->release = &ttm_bo_global_release;
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+               drm_global_item_unref(&qdev->mman.mem_global_ref);
+               return r;
+       }
+
+       qdev->mman.mem_global_referenced = true;
+       return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+       if (qdev->mman.mem_global_referenced) {
+               drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+               drm_global_item_unref(&qdev->mman.mem_global_ref);
+               qdev->mman.mem_global_referenced = false;
+       }
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ttm_buffer_object *bo;
+       struct qxl_device *qdev;
+       int r;
+
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       if (bo == NULL)
+               return VM_FAULT_NOPAGE;
+       qdev = qxl_get_qdev(bo->bdev);
+       r = ttm_vm_ops->fault(vma, vmf);
+       return r;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *file_priv;
+       struct qxl_device *qdev;
+       int r;
+
+       if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+               pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
+                       __func__, vma->vm_pgoff);
+               return drm_mmap(filp, vma);
+       }
+
+       file_priv = filp->private_data;
+       qdev = file_priv->minor->dev->dev_private;
+       if (qdev == NULL) {
+               DRM_ERROR(
+                "filp->private_data->minor->dev->dev_private == NULL\n");
+               return -EINVAL;
+       }
+       QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+                __func__, filp->private_data, vma->vm_pgoff);
+
+       r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+       if (unlikely(r != 0))
+               return r;
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
+               qxl_ttm_vm_ops = *ttm_vm_ops;
+               qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+       }
+       vma->vm_ops = &qxl_ttm_vm_ops;
+       return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+       return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                            struct ttm_mem_type_manager *man)
+{
+       struct qxl_device *qdev;
+
+       qdev = qxl_get_qdev(bdev);
+
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_VRAM:
+       case TTM_PL_PRIV0:
+               /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED |
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement)
+{
+       struct qxl_bo *qbo;
+       static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+       if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+               placement->fpfn = 0;
+               placement->lpfn = 0;
+               placement->placement = &placements;
+               placement->busy_placement = &placements;
+               placement->num_placement = 1;
+               placement->num_busy_placement = 1;
+               return;
+       }
+       qbo = container_of(bo, struct qxl_bo, tbo);
+       qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+       *placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+       return 0;
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+                                 struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.is_iomem = true;
+               mem->bus.base = qdev->vram_base;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               break;
+       case TTM_PL_PRIV0:
+               mem->bus.is_iomem = true;
+               mem->bus.base = qdev->surfaceram_base;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+       struct ttm_dma_tt               ttm;
+       struct qxl_device               *qdev;
+       u64                             offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+                               struct ttm_mem_reg *bo_mem)
+{
+       struct qxl_ttm_tt *gtt = (void *)ttm;
+
+       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       if (!ttm->num_pages) {
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    ttm->num_pages, bo_mem, ttm);
+       }
+       /* Not implemented */
+       return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+       /* Not implemented */
+       return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+       struct qxl_ttm_tt *gtt = (void *)ttm;
+
+       ttm_dma_tt_fini(&gtt->ttm);
+       kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+       .bind = &qxl_ttm_backend_bind,
+       .unbind = &qxl_ttm_backend_unbind,
+       .destroy = &qxl_ttm_backend_destroy,
+};
+
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       r = ttm_pool_populate(ttm);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
+                                       unsigned long size, uint32_t page_flags,
+                                       struct page *dummy_read_page)
+{
+       struct qxl_device *qdev;
+       struct qxl_ttm_tt *gtt;
+
+       qdev = qxl_get_qdev(bdev);
+       gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+       if (gtt == NULL)
+               return NULL;
+       gtt->ttm.ttm.func = &qxl_backend_func;
+       gtt->qdev = qdev;
+       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+                           dummy_read_page)) {
+               kfree(gtt);
+               return NULL;
+       }
+       return &gtt->ttm.ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+                            struct ttm_mem_reg *new_mem)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       BUG_ON(old_mem->mm_node != NULL);
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo,
+                      bool evict, bool interruptible,
+                      bool no_wait_gpu,
+                      struct ttm_mem_reg *new_mem)
+{
+       struct ttm_mem_reg *old_mem = &bo->mem;
+       if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+               qxl_move_null(bo, new_mem);
+               return 0;
+       }
+       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static int qxl_sync_obj_wait(void *sync_obj,
+                            bool lazy, bool interruptible)
+{
+       struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+       int count = 0, sc = 0;
+       struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+       if (qfence->num_active_releases == 0)
+               return 0;
+
+retry:
+       if (sc == 0) {
+               if (bo->type == QXL_GEM_DOMAIN_SURFACE)
+                       qxl_update_surface(qfence->qdev, bo);
+       } else if (sc >= 1) {
+               qxl_io_notify_oom(qfence->qdev);
+       }
+
+       sc++;
+
+       for (count = 0; count < 10; count++) {
+               bool ret;
+               ret = qxl_queue_garbage_collect(qfence->qdev, true);
+               if (ret == false)
+                       break;
+
+               if (qfence->num_active_releases == 0)
+                       return 0;
+       }
+
+       if (qfence->num_active_releases) {
+               bool have_drawable_releases = false;
+               void **slot;
+               struct radix_tree_iter iter;
+               int release_id;
+
+               radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
+                       struct qxl_release *release;
+
+                       release_id = iter.index;
+                       release = qxl_release_from_id_locked(qfence->qdev, release_id);
+                       if (release == NULL)
+                               continue;
+
+                       if (release->type == QXL_RELEASE_DRAWABLE)
+                               have_drawable_releases = true;
+               }
+
+               qxl_queue_garbage_collect(qfence->qdev, true);
+
+               if (have_drawable_releases || sc < 4) {
+                       if (sc > 2)
+                               /* back off */
+                               usleep_range(500, 1000);
+                       if (have_drawable_releases && sc > 300) {
+                               WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
+                               return -EBUSY;
+                       }
+                       goto retry;
+               }
+       }
+       return 0;
+}
+
+static int qxl_sync_obj_flush(void *sync_obj)
+{
+       return 0;
+}
+
+static void qxl_sync_obj_unref(void **sync_obj)
+{
+}
+
+static void *qxl_sync_obj_ref(void *sync_obj)
+{
+       return sync_obj;
+}
+
+static bool qxl_sync_obj_signaled(void *sync_obj)
+{
+       struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+       return (qfence->num_active_releases == 0);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+                              struct ttm_mem_reg *new_mem)
+{
+       struct qxl_bo *qbo;
+       struct qxl_device *qdev;
+
+       if (!qxl_ttm_bo_is_qxl_bo(bo))
+               return;
+       qbo = container_of(bo, struct qxl_bo, tbo);
+       qdev = qbo->gem_base.dev->dev_private;
+
+       if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+               qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+       .ttm_tt_create = &qxl_ttm_tt_create,
+       .ttm_tt_populate = &qxl_ttm_tt_populate,
+       .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
+       .invalidate_caches = &qxl_invalidate_caches,
+       .init_mem_type = &qxl_init_mem_type,
+       .evict_flags = &qxl_evict_flags,
+       .move = &qxl_bo_move,
+       .verify_access = &qxl_verify_access,
+       .io_mem_reserve = &qxl_ttm_io_mem_reserve,
+       .io_mem_free = &qxl_ttm_io_mem_free,
+       .sync_obj_signaled = &qxl_sync_obj_signaled,
+       .sync_obj_wait = &qxl_sync_obj_wait,
+       .sync_obj_flush = &qxl_sync_obj_flush,
+       .sync_obj_unref = &qxl_sync_obj_unref,
+       .sync_obj_ref = &qxl_sync_obj_ref,
+       .move_notify = &qxl_bo_move_notify,
+};
+
+
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+       int r;
+       int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+       r = qxl_ttm_global_init(qdev);
+       if (r)
+               return r;
+       /* No others user of address space so set it to 0 */
+       r = ttm_bo_device_init(&qdev->mman.bdev,
+                              qdev->mman.bo_global_ref.ref.object,
+                              &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+       if (r) {
+               DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+               return r;
+       }
+       /* NOTE: this includes the framebuffer (aka surface 0) */
+       num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+                          num_io_pages);
+       if (r) {
+               DRM_ERROR("Failed initializing VRAM heap.\n");
+               return r;
+       }
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+                          qdev->surfaceram_size / PAGE_SIZE);
+       if (r) {
+               DRM_ERROR("Failed initializing Surfaces heap.\n");
+               return r;
+       }
+       DRM_INFO("qxl: %uM of VRAM memory size\n",
+                (unsigned)qdev->vram_size / (1024 * 1024));
+       DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+                ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+       if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+               qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+       r = qxl_ttm_debugfs_init(qdev);
+       if (r) {
+               DRM_ERROR("Failed to init debugfs\n");
+               return r;
+       }
+       return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       ttm_bo_device_release(&qdev->mman.bdev);
+       qxl_ttm_global_fini(qdev);
+       DRM_INFO("qxl: ttm finalized\n");
+}
+
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       struct qxl_device *rdev = dev->dev_private;
+       int ret;
+       struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+       spin_lock(&glob->lru_lock);
+       ret = drm_mm_dump_table(m, mm);
+       spin_unlock(&glob->lru_lock);
+       return ret;
+}
+#endif
+
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+       static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+       static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+       unsigned i;
+
+       for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+               if (i == 0)
+                       sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+               else
+                       sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+               qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+               qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+               qxl_mem_types_list[i].driver_features = 0;
+               if (i == 0)
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+               else
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+
+       }
+       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+}
index 8be35c809c7b612c5a33bc5271051273bd58cd92..af894584dd909f287dade021a50bef2bce233489 100644 (file)
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
        mutex_lock(&man->io_reserve_mutex);
        return 0;
 }
+EXPORT_SYMBOL(ttm_mem_io_lock);
 
 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 {
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 
        mutex_unlock(&man->io_reserve_mutex);
 }
+EXPORT_SYMBOL(ttm_mem_io_unlock);
 
 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 {
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
        return 0;
 }
 
-static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-                             struct ttm_mem_reg *mem)
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                      struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        int ret = 0;
@@ -134,9 +137,10 @@ retry:
        }
        return ret;
 }
+EXPORT_SYMBOL(ttm_mem_io_reserve);
 
-static void ttm_mem_io_free(struct ttm_bo_device *bdev,
-                           struct ttm_mem_reg *mem)
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                    struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
                bdev->driver->io_mem_free(bdev, mem);
 
 }
+EXPORT_SYMBOL(ttm_mem_io_free);
 
 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
 {
index 74705f329d992cb2a52b1f43f98ac0b4afe2155e..3df9f16b041cb214cc5570f890af2e892c25ac7e 100644 (file)
@@ -147,7 +147,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
            bo->vm_node->start - vma->vm_pgoff;
-       page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+       page_last = vma_pages(vma) +
            bo->vm_node->start - vma->vm_pgoff;
 
        if (unlikely(page_offset >= bo->num_pages)) {
@@ -258,7 +258,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 
        read_lock(&bdev->vm_lock);
        bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-                                (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+                                vma_pages(vma));
        if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
                bo = NULL;
        read_unlock(&bdev->vm_lock);
index 512b01c04ea7610bcb6466208255a9d83a36cb1d..aa341d135867eea541c9ffdd7113f75641eef5c4 100644 (file)
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)
                     hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
                        return true;
                break;
+       case USB_VENDOR_ID_ATMEL_V_USB:
+               /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+                * it has the same USB ID as many Atmel V-USB devices. This
+                * usb radio is handled by radio-ma901.c driver so we want
+                * ignore the hid. Check the name, bus, product and ignore
+                * if we have MA901 usb radio.
+                */
+               if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+                       hdev->bus == BUS_USB &&
+                       strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+                       return true;
+               break;
        }
 
        if (hdev->type == HID_TYPE_USBMOUSE &&
index 92e47e5c956426878e0e69d80811831d9b92e483..5309fd5eb0ebae7357044046f91ec9e0efe7e345 100644 (file)
 #define USB_VENDOR_ID_ATMEL            0x03eb
 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
 #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER      0x2118
+#define USB_VENDOR_ID_ATMEL_V_USB      0x16c0
+#define USB_DEVICE_ID_ATMEL_V_USB      0x05df
 
 #define USB_VENDOR_ID_AUREAL           0x0755
 #define USB_DEVICE_ID_AUREAL_W01RN     0x2626
 #define USB_VENDOR_ID_MADCATZ          0x0738
 #define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
 
-#define USB_VENDOR_ID_MASTERKIT                        0x16c0
-#define USB_DEVICE_ID_MASTERKIT_MA901RADIO     0x05df
-
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
 #define USB_VENDOR_ID_MONTEREY         0x0566
 #define USB_DEVICE_ID_GENIUS_KB29E     0x3004
 
+#define USB_VENDOR_ID_MSI              0x1770
+#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL     0xff00
+
 #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
 #define USB_DEVICE_ID_N_S_HARMONY      0xc359
 
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001                0x3001
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 
+#define USB_VENDOR_ID_REALTEK          0x0bda
+#define USB_DEVICE_ID_REALTEK_READER   0x0152
+
 #define USB_VENDOR_ID_ROCCAT           0x1e7d
 #define USB_DEVICE_ID_ROCCAT_ARVO      0x30d4
 #define USB_DEVICE_ID_ROCCAT_ISKU      0x319c
index f7f113ba083eb43fb1073d189a4979f7d4d6d724..a8ce44296cfddaf34a486a7c78edde4836cd3115 100644 (file)
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
        return 0;
 }
 
+static void magicmouse_input_configured(struct hid_device *hdev,
+               struct hid_input *hi)
+
+{
+       struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+       int ret = magicmouse_setup_input(msc->input, hdev);
+       if (ret) {
+               hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+               /* clean msc->input to notify probe() of the failure */
+               msc->input = NULL;
+       }
+}
+
+
 static int magicmouse_probe(struct hid_device *hdev,
        const struct hid_device_id *id)
 {
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,
                goto err_free;
        }
 
-       /* We do this after hid-input is done parsing reports so that
-        * hid-input uses the most natural button and axis IDs.
-        */
-       if (msc->input) {
-               ret = magicmouse_setup_input(msc->input, hdev);
-               if (ret) {
-                       hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
-                       goto err_stop_hw;
-               }
+       if (!msc->input) {
+               hid_err(hdev, "magicmouse input not registered\n");
+               ret = -ENOMEM;
+               goto err_stop_hw;
        }
 
        if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {
        .remove = magicmouse_remove,
        .raw_event = magicmouse_raw_event,
        .input_mapping = magicmouse_input_mapping,
+       .input_configured = magicmouse_input_configured,
 };
 module_hid_driver(magicmouse_driver);
 
index 7a1ebb867cf499596e9c4fe5b1a43860b86d4351..82e9211b3ca97cfec8f316d33870f22f6fac0617 100644 (file)
@@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
 {
        struct mt_device *td = hid_get_drvdata(hid);
        __s32 quirks = td->mtclass.quirks;
+       struct input_dev *input = field->hidinput->input;
 
        if (hid->claimed & HID_CLAIMED_INPUT) {
                switch (usage->hid) {
@@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
                        break;
 
                default:
+                       if (usage->type)
+                               input_event(input, usage->type, usage->code,
+                                               value);
                        return;
                }
 
                if (usage->usage_index + 1 == field->report_count) {
                        /* we only take into account the last report. */
                        if (usage->hid == td->last_slot_field)
-                               mt_complete_slot(td, field->hidinput->input);
+                               mt_complete_slot(td, input);
 
                        if (field->index == td->last_field_index
                                && td->num_received >= td->num_expected)
index e0e6abf1cd3bd9b3cfa3d7cc73a2247c84c31be4..19b8360f2330ddec85856e09605a1ce69e32d880 100644 (file)
@@ -73,6 +73,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@@ -80,6 +81,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
index 17ba4f8bc12d0b19ec7d380dc6677796d79f12de..70b1808a08f4d12bf9d1668cb5bae06d1cd2e202 100644 (file)
@@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
                                          wq->rq.memsize, &(wq->rq.dma_addr),
                                          GFP_KERNEL);
-       if (!wq->rq.queue)
+       if (!wq->rq.queue) {
+               ret = -ENOMEM;
                goto free_sq;
+       }
        PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
                __func__, wq->sq.queue,
                (unsigned long long)virt_to_phys(wq->sq.queue),
index 439c35d4a669978aba8820151f2f6c98f5a47558..ea93870266eb7fda30ddf502b5d233fb1116423b 100644 (file)
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
                goto bail;
        }
 
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+       opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
        dev->opstats[opcode].n_bytes += tlen;
        dev->opstats[opcode].n_packets++;
 
index 8349f9c5064c042d568cf56619b76dd391410402..1e603a375069022bfed295f071f8c729b4ad826b 100644 (file)
@@ -1,7 +1,7 @@
 config INFINIBAND_QIB
-       tristate "QLogic PCIe HCA support"
+       tristate "Intel PCIe HCA support"
        depends on 64BIT
        ---help---
-       This is a low-level driver for QLogic PCIe QLE InfiniBand host
-       channel adapters.  This driver does not support the QLogic
+       This is a low-level driver for Intel PCIe QLE InfiniBand host
+       channel adapters.  This driver does not support the Intel
        HyperTransport card (model QHT7140).
index 5423edcab51f4404905e47674957cb32e5f33b9f..216092477dfcf7eb4a51187ffe825110c5eb3286 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
                 "Attempt pre-IBTA 1.2 DDR speed negotiation");
 
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic IB driver");
+MODULE_AUTHOR("Intel <ibsupport@intel.com>");
+MODULE_DESCRIPTION("Intel IB driver");
 MODULE_VERSION(QIB_DRIVER_VERSION);
 
 /*
index a099ac171e226f1073f4a70ccb680e669b2ff048..0232ae56b1fa2b185e1a551e074e5d256f15c087 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  * All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
 
 /*
  * This file contains all the chip-specific register information and
- * access functions for the QLogic QLogic_IB PCI-Express chip.
+ * access functions for the Intel Intel_IB PCI-Express chip.
  *
  */
 
index 50e33aa0b4e39628c5de95e314d57de68bbc4be5..173f805790da4522f3f4a8881b8b2df5883b9b4b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
 static void qib_remove_one(struct pci_dev *);
 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
 
-#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
 #define PFX QIB_DRV_NAME ": "
 
 static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dd = qib_init_iba6120_funcs(pdev, ent);
 #else
                qib_early_err(&pdev->dev,
-                       "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
+                       "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
                        ent->device);
                dd = ERR_PTR(-ENODEV);
 #endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        default:
                qib_early_err(&pdev->dev,
-                       "Failing on unknown QLogic deviceid 0x%x\n",
+                       "Failing on unknown Intel deviceid 0x%x\n",
                        ent->device);
                ret = -ENODEV;
        }
index 50a8a0d4fe676f5467c677d66a4985170900059d..08a6c6d39e5666228cd842b1597fc5d9dcf48052 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -44,7 +44,7 @@
 #include "qib.h"
 #include "qib_7220.h"
 
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
+#define SD7220_FW_NAME "intel/sd7220.fw"
 MODULE_FIRMWARE(SD7220_FW_NAME);
 
 /*
index ba51a4715a1dcdd5963261d84497dc1e6974c070..7c0ab16a2fe230fc31b44fd43b04f3e92d2a26b8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        ibdev->dma_ops = &qib_dma_mapping_ops;
 
        snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
-                "QLogic Infiniband HCA %s", init_utsname()->nodename);
+                "Intel Infiniband HCA %s", init_utsname()->nodename);
 
        ret = ib_register_device(ibdev, qib_create_port_files);
        if (ret)
index 67b0c1d23678d26565981cf9815993027c0a1b5e..1ef880de3a41d97fe328951069f3b6c30066daf1 100644 (file)
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
-                       if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
-                               ipoib_warn(priv, "request notify on send CQ failed\n");
                        netif_stop_queue(dev);
+                       rc = ib_req_notify_cq(priv->send_cq,
+                               IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+                       if (rc < 0)
+                               ipoib_warn(priv, "request notify on send CQ failed\n");
+                       else if (rc)
+                               ipoib_send_comp_handler(priv->send_cq, dev);
                }
        }
 }
index 5c514d0711d1b57669b6acee551400bf88d44265..c332fb98480d28e42739e9b674b7b99ce8be427d 100644 (file)
@@ -130,7 +130,7 @@ config IRQ_REMAP
 # OMAP IOMMU support
 config OMAP_IOMMU
        bool "OMAP IOMMU Support"
-       depends on ARCH_OMAP
+       depends on ARCH_OMAP2PLUS
        select IOMMU_API
 
 config OMAP_IOVMM
index 98f555dafb55c8edc32393919ee48140d024b184..b287ca33833df792350baad6f82fefbe1494e038 100644 (file)
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
 
                /* allocate a protection domain if a device is added */
                dma_domain = find_protection_domain(devid);
-               if (dma_domain)
-                       goto out;
-               dma_domain = dma_ops_domain_alloc();
-               if (!dma_domain)
-                       goto out;
-               dma_domain->target_dev = devid;
-
-               spin_lock_irqsave(&iommu_pd_list_lock, flags);
-               list_add_tail(&dma_domain->list, &iommu_pd_list);
-               spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-
-               dev_data = get_dev_data(dev);
+               if (!dma_domain) {
+                       dma_domain = dma_ops_domain_alloc();
+                       if (!dma_domain)
+                               goto out;
+                       dma_domain->target_dev = devid;
+
+                       spin_lock_irqsave(&iommu_pd_list_lock, flags);
+                       list_add_tail(&dma_domain->list, &iommu_pd_list);
+                       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+               }
 
                dev->archdata.dma_ops = &amd_iommu_dma_ops;
 
index b6ecddb63cd0fc9397ccf24e0951dbf13ea9a719..e3c2d74b7684596790fd0293461dc74922a850ba 100644 (file)
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
  *     BIOS should disable L2B micellaneous clock gating by setting
  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
  */
-static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
 {
        u32 value;
 
index d56f8c17c5fe4dcc2e4db23085d4914b977c0fd0..7c11ff368d07f88dc357dd3f6e22c9fbe2ed6cc1 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/cpumask.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/cpumask.h>
 #include <linux/errno.h>
 #include <linux/msi.h>
 #include <linux/irq.h>
index d4e7567b367c04d25b9a21ba4c50d0db314596aa..0b899cb6cda1904b6f07cdb924f94be3d76ccb56 100644 (file)
@@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
        if (enable) {
                if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
+               else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
                else
                        ret = -EINVAL;
index ccd18e4ee7892e62034a0d9b1e351107fb787749..54579e4c740b4da7f8590ad345145553d63d8062 100644 (file)
@@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
    vdelay      start of active video in 2 * field lines relative to
                trailing edge of /VRESET pulse (VDELAY register).
    sheight     height of active video in 2 * field lines.
+   extraheight Added to sheight for cropcap.bounds.height only
    videostart0 ITU-R frame line number of the line corresponding
                to vdelay in the first field. */
 #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth,     \
-               vdelay, sheight, videostart0)                            \
+               vdelay, sheight, extraheight, videostart0)               \
        .cropcap.bounds.left = minhdelayx1,                              \
        /* * 2 because vertically we count field lines times two, */     \
        /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */           \
        .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
        /* 4 is a safety margin at the end of the line. */               \
        .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4,        \
-       .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY,      \
+       .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) -  \
+                                MIN_VDELAY,                             \
        .cropcap.defrect.left = hdelayx1,                                \
        .cropcap.defrect.top = (videostart0) * 2,                        \
        .cropcap.defrect.width = swidth,                                 \
@@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* totalwidth */ 1135,
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
-               /* bt878 (and bt848?) can capture another
-                  line below active video. */
-                       /* sheight */ (576 + 2) + 0x20 - 2,
+                       /* sheight */ 576,
+                       /* bt878 (and bt848?) can capture another
+                          line below active video. */
+                       /* extraheight */ 2,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_SECAM,
@@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_Nc,
@@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_M,
@@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_N,
@@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_NTSC_M_JP,
@@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x16,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                /* that one hopefully works with the strange timing
@@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        }
 };
index 05d7b6333461c68f5aaeb919ad1c9dc584cf5952..a0639e77997357d96422568422e2ef28c12a2772 100644 (file)
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
 
 config VIDEO_SH_VEU
        tristate "SuperH VEU mem2mem video processing driver"
-       depends on VIDEO_DEV && VIDEO_V4L2
+       depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index 82d9f6ac12f3a271481e106df7f75017b3f962c1..33b5ffc8d66dfe136e2cd0df4129963b6c60ad79 100644 (file)
@@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
 
 static int gsc_m2m_resume(struct gsc_dev *gsc)
 {
+       struct gsc_ctx *ctx;
        unsigned long flags;
 
        spin_lock_irqsave(&gsc->slock, flags);
        /* Clear for full H/W setup in first run after resume */
+       ctx = gsc->m2m.ctx;
        gsc->m2m.ctx = NULL;
        spin_unlock_irqrestore(&gsc->slock, flags);
 
        if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
-               gsc_m2m_job_finish(gsc->m2m.ctx,
-                                   VB2_BUF_STATE_ERROR);
+               gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
        return 0;
 }
 
@@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
        /* Do not resume if the device was idle before system suspend */
        spin_lock_irqsave(&gsc->slock, flags);
        if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
-           !gsc_m2m_active(gsc)) {
+           !gsc_m2m_opened(gsc)) {
                spin_unlock_irqrestore(&gsc->slock, flags);
                return 0;
        }
index e3916bde45cf13a4cb92b8bcd83715b1c1a07ce0..0f513dd19f86de105f36913a6d5e6e59839fd91a 100644 (file)
@@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
 
 static int fimc_m2m_resume(struct fimc_dev *fimc)
 {
+       struct fimc_ctx *ctx;
        unsigned long flags;
 
        spin_lock_irqsave(&fimc->slock, flags);
        /* Clear for full H/W setup in first run after resume */
+       ctx = fimc->m2m.ctx;
        fimc->m2m.ctx = NULL;
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
-               fimc_m2m_job_finish(fimc->m2m.ctx,
-                                   VB2_BUF_STATE_ERROR);
+               fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
        return 0;
 }
 
index f0af0754a7b46a04cee5f41552ede4d8bc2e96ed..ac9663ce2a4927a0a3249bde33107fb1a17a9783 100644 (file)
@@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
 void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
 {
        enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
-       unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+       int i = ARRAY_SIZE(src_pixfmt_map);
        u32 cfg;
 
-       while (i-- >= 0) {
+       while (--i >= 0) {
                if (src_pixfmt_map[i][0] == pixelcode)
                        break;
        }
@@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
                { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
        };
        u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
-       unsigned int i = ARRAY_SIZE(pixcode);
+       int i = ARRAY_SIZE(pixcode);
 
-       while (i-- >= 0)
+       while (--i >= 0)
                if (pixcode[i][0] == dev->fmt->mbus_code)
                        break;
        cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
index bfc4206935c85a93fa75b6893e3afdcc8efb7c92..bbc35de7db278fe053c6de4d02be7fc7c40d4321 100644 (file)
@@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
        .id     = V4L2_CTRL_CLASS_USER | 0x1001,
        .type   = V4L2_CTRL_TYPE_BOOLEAN,
        .name   = "Test Pattern 640x480",
+       .step   = 1,
 };
 
 static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
index a17fcb2d5d413ca66ed9a16a31a34045a83a0d6b..cd38d708ab584ef4487d4d1960aabe06d6b7138b 100644 (file)
@@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
        struct fimc_pipeline *pipeline;
        struct v4l2_subdev *sd;
        struct mutex *lock;
-       int ret = 0;
+       int i, ret = 0;
        int ref_count;
 
        if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
                return 0;
        }
 
+       mutex_lock(lock);
+       ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
+
        if (!(flags & MEDIA_LNK_FL_ENABLED)) {
-               int i;
-               mutex_lock(lock);
-               ret = __fimc_pipeline_close(pipeline);
+               if (ref_count > 0) {
+                       ret = __fimc_pipeline_close(pipeline);
+                       if (!ret && fimc)
+                               fimc_ctrls_delete(fimc->vid_cap.ctx);
+               }
                for (i = 0; i < IDX_MAX; i++)
                        pipeline->subdevs[i] = NULL;
-               if (fimc)
-                       fimc_ctrls_delete(fimc->vid_cap.ctx);
-               mutex_unlock(lock);
-               return ret;
+       } else if (ref_count > 0) {
+               /*
+                * Link activation. Enable power of pipeline elements only if
+                * the pipeline is already in use, i.e. its video node is open.
+                * Recreate the controls destroyed during the link deactivation.
+                */
+               ret = __fimc_pipeline_open(pipeline,
+                                          source->entity, true);
+               if (!ret && fimc)
+                       ret = fimc_capture_ctrls_create(fimc);
        }
-       /*
-        * Link activation. Enable power of pipeline elements only if the
-        * pipeline is already in use, i.e. its video node is opened.
-        * Recreate the controls destroyed during the link deactivation.
-        */
-       mutex_lock(lock);
-
-       ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
-       if (ref_count > 0)
-               ret = __fimc_pipeline_open(pipeline, source->entity, true);
-       if (!ret && fimc)
-               ret = fimc_capture_ctrls_create(fimc);
 
        mutex_unlock(lock);
        return ret ? -EPIPE : ret;
index e84703c314ce0d50a9861cf674244b9a3342615f..1cb6d57987c6f2dd06e0ff8d8965d0c02179039e 100644 (file)
@@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
        unsigned int frame_type;
 
        dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
-       frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
+       frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
 
        /* If frame is same as previous then skip and do not dequeue */
        if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
index 2356fd52a169abf2680dd22ebb9e17453c4ac52d..4f6b553c4b2de90f6686f2da38cf5221f68c352d 100644 (file)
@@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
                .minimum = 0,
                .maximum = 1,
                .default_value = 0,
+               .step = 1,
                .menu_skip_mask = 0,
        },
        {
index c61f590029ad7c05ef75911c69e5fdbe50f24e8e..348dafc0318aae60de8d18660334cb5244e2dd98 100644 (file)
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
 static int usb_ma901radio_probe(struct usb_interface *intf,
                                const struct usb_device_id *id)
 {
+       struct usb_device *dev = interface_to_usbdev(intf);
        struct ma901radio_device *radio;
        int retval = 0;
 
+       /* Masterkit MA901 usb radio has the same USB ID as many others
+        * Atmel V-USB devices. Let's make additional checks to be sure
+        * that this is our device.
+        */
+
+       if (dev->product && dev->manufacturer &&
+               (strncmp(dev->product, "MA901", 5) != 0
+               || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
+               return -ENODEV;
+
        radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
        if (!radio) {
                dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
index 19f3563c61da651673b5ea7884b43b8e4c2145e1..5a79c333d45e66308e5b0f75cbc91ed9d60ce329 100644 (file)
@@ -291,7 +291,7 @@ config IR_TTUSBIR
 
 config IR_RX51
        tristate "Nokia N900 IR transmitter diode"
-       depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
+       depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
        ---help---
           Say Y or M here if you want to enable support for the IR
           transmitter diode built in the Nokia N900 (RX51) device.
index a9d355230e8eb2efb52fb3fbdff2a5b8297a7a73..768aaf62d5dc300a7b2d7e150265e27035ef15cb 100644 (file)
@@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y)
   videodev-objs += v4l2-compat-ioctl32.o
 endif
 
-obj-$(CONFIG_VIDEO_DEV) += videodev.o
+obj-$(CONFIG_VIDEO_V4L2) += videodev.o
 obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
 obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
 
index 45ea7185c003d5b5deee66ad1a274c23212ed77f..642c6223fa6cd177f22c66e40b3d3531a6d841a4 100644 (file)
@@ -151,6 +151,20 @@ static void mei_me_intr_disable(struct mei_device *dev)
        mei_hcsr_set(hw, hcsr);
 }
 
+/**
+ * mei_me_hw_reset_release - release device from the reset
+ *
+ * @dev: the device structure
+ */
+static void mei_me_hw_reset_release(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+
+       hcsr |= H_IG;
+       hcsr &= ~H_RST;
+       mei_hcsr_set(hw, hcsr);
+}
 /**
  * mei_me_hw_reset - resets fw via mei csr register.
  *
@@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
        if (intr_enable)
                hcsr |= H_IE;
        else
-               hcsr &= ~H_IE;
-
-       mei_hcsr_set(hw, hcsr);
-
-       hcsr = mei_hcsr_read(hw) | H_IG;
-       hcsr &= ~H_RST;
+               hcsr |= ~H_IE;
 
        mei_hcsr_set(hw, hcsr);
 
-       hcsr = mei_hcsr_read(hw);
+       if (dev->dev_state == MEI_DEV_POWER_DOWN)
+               mei_me_hw_reset_release(dev);
 
-       dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+       dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
 }
 
 /**
@@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
                        mutex_unlock(&dev->device_lock);
                        return IRQ_HANDLED;
                } else {
-                       dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+                       dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
+                       mei_me_hw_reset_release(dev);
                        mutex_unlock(&dev->device_lock);
                        return IRQ_HANDLED;
                }
index 6ec530168afbbe2268da06d1c2508fd044cb943d..356179991a2e9ff87f7e34efb452b5ad4a547aa6 100644 (file)
@@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
        mei_cl_all_write_clear(dev);
 }
 
+void mei_stop(struct mei_device *dev)
+{
+       dev_dbg(&dev->pdev->dev, "stopping the device.\n");
+
+       mutex_lock(&dev->device_lock);
+
+       cancel_delayed_work(&dev->timer_work);
+
+       mei_wd_stop(dev);
+
+       dev->dev_state = MEI_DEV_POWER_DOWN;
+       mei_reset(dev, 0);
+
+       mutex_unlock(&dev->device_lock);
+
+       flush_scheduled_work();
+}
+
 
 
 
index cb80166161f0f82ad725e1073baf1bc607fcc6bf..97873812e33b28c06e85dc000869ef48f1312471 100644 (file)
@@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
 void mei_device_init(struct mei_device *dev);
 void mei_reset(struct mei_device *dev, int interrupts);
 int mei_hw_init(struct mei_device *dev);
+void mei_stop(struct mei_device *dev);
 
 /*
  *  MEI interrupt functions prototype
index b40ec0601ab0bcc7325f0dfedb8ebefccdd896ec..b8b5c9c3ad0375bab4605b038bcbda60ff970b4f 100644 (file)
@@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev)
 
        hw = to_me_hw(dev);
 
-       mutex_lock(&dev->device_lock);
-
-       cancel_delayed_work(&dev->timer_work);
 
-       mei_wd_stop(dev);
+       dev_err(&pdev->dev, "stop\n");
+       mei_stop(dev);
 
        mei_pdev = NULL;
 
-       if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
-               dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
-               mei_cl_disconnect(&dev->iamthif_cl);
-       }
-       if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
-               dev->wd_cl.state = MEI_FILE_DISCONNECTING;
-               mei_cl_disconnect(&dev->wd_cl);
-       }
-
-       /* Unregistering watchdog device */
        mei_watchdog_unregister(dev);
 
-       /* remove entry if already in list */
-       dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
-
-       if (dev->open_handle_count > 0)
-               dev->open_handle_count--;
-       mei_cl_unlink(&dev->wd_cl);
-
-       if (dev->open_handle_count > 0)
-               dev->open_handle_count--;
-       mei_cl_unlink(&dev->iamthif_cl);
-
-       dev->iamthif_current_cb = NULL;
-       dev->me_clients_num = 0;
-
-       mutex_unlock(&dev->device_lock);
-
-       flush_scheduled_work();
-
        /* disable interrupts */
        mei_disable_interrupts(dev);
 
@@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct mei_device *dev = pci_get_drvdata(pdev);
-       int err;
 
        if (!dev)
                return -ENODEV;
-       mutex_lock(&dev->device_lock);
 
-       cancel_delayed_work(&dev->timer_work);
+       dev_err(&pdev->dev, "suspend\n");
 
-       /* Stop watchdog if exists */
-       err = mei_wd_stop(dev);
-       /* Set new mei state */
-       if (dev->dev_state == MEI_DEV_ENABLED ||
-           dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-               dev->dev_state = MEI_DEV_POWER_DOWN;
-               mei_reset(dev, 0);
-       }
-       mutex_unlock(&dev->device_lock);
+       mei_stop(dev);
+
+       mei_disable_interrupts(dev);
 
        free_irq(pdev->irq, dev);
        pci_disable_msi(pdev);
 
-       return err;
+       return 0;
 }
 
 static int mei_pci_resume(struct device *device)
index ed5c433cd4936f8b52c98352bbf21ead11a67ae8..f3cdd904fe4d6e1ae5c48288858d793dbc0af22c 100644 (file)
@@ -42,9 +42,11 @@ struct datagram_entry {
 
 struct delayed_datagram_info {
        struct datagram_entry *entry;
-       struct vmci_datagram msg;
        struct work_struct work;
        bool in_dg_host_queue;
+       /* msg and msg_payload must be together. */
+       struct vmci_datagram msg;
+       u8 msg_payload[];
 };
 
 /* Number of in-flight host->host datagrams */
index 6bbd90e1123c59c58329505537568a2ec2d5b9b0..a51241b2e62186b03b4958faa509c4cefc14d052 100644 (file)
@@ -1976,12 +1976,11 @@ static int __bond_release_one(struct net_device *bond_dev,
                return -EINVAL;
        }
 
+       write_unlock_bh(&bond->lock);
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
        netdev_rx_handler_unregister(slave_dev);
-       write_unlock_bh(&bond->lock);
-       synchronize_net();
        write_lock_bh(&bond->lock);
 
        if (!all && !bond->params.fail_over_mac) {
index 1c9e09fbdff8346e99413e0313d03ed401f463e2..ea7a388f484306710a33375f3a553fd1ecc5b621 100644 (file)
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
        sprintf(linkname, "slave_%s", slave->name);
        ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
                                linkname);
+
+       /* free the master link created earlier in case of error */
+       if (ret)
+               sysfs_remove_link(&(slave->dev.kobj), "master");
+
        return ret;
 
 }
@@ -522,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+               pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
                       bond->dev->name, new_value, INT_MAX);
                ret = -EINVAL;
                goto out;
@@ -537,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
        pr_info("%s: Setting ARP monitoring interval to %d.\n",
                bond->dev->name, new_value);
        bond->params.arp_interval = new_value;
-       if (bond->params.miimon) {
-               pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
-                       bond->dev->name, bond->dev->name);
-               bond->params.miimon = 0;
-       }
-       if (!bond->params.arp_targets[0]) {
-               pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
-                       bond->dev->name);
+       if (new_value) {
+               if (bond->params.miimon) {
+                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+                               bond->dev->name, bond->dev->name);
+                       bond->params.miimon = 0;
+               }
+               if (!bond->params.arp_targets[0])
+                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+                               bond->dev->name);
        }
        if (bond->dev->flags & IFF_UP) {
                /* If the interface is up, we may need to fire off
@@ -552,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                 * timer will get fired off when the open function
                 * is called.
                 */
-               cancel_delayed_work_sync(&bond->mii_work);
-               queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->arp_work);
+               } else {
+                       cancel_delayed_work_sync(&bond->mii_work);
+                       queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               }
        }
-
 out:
        rtnl_unlock();
        return ret;
@@ -697,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -752,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+               pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -963,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
-       } else {
-               pr_info("%s: Setting MII monitoring interval to %d.\n",
-                       bond->dev->name, new_value);
-               bond->params.miimon = new_value;
-               if (bond->params.updelay)
-                       pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.updelay * bond->params.miimon);
-               if (bond->params.downdelay)
-                       pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.downdelay * bond->params.miimon);
-               if (bond->params.arp_interval) {
-                       pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
-                               bond->dev->name);
-                       bond->params.arp_interval = 0;
-                       if (bond->params.arp_validate) {
-                               bond->params.arp_validate =
-                                       BOND_ARP_VALIDATE_NONE;
-                       }
-               }
-
-               if (bond->dev->flags & IFF_UP) {
-                       /* If the interface is up, we may need to fire off
-                        * the MII timer. If the interface is down, the
-                        * timer will get fired off when the open function
-                        * is called.
-                        */
+       }
+       pr_info("%s: Setting MII monitoring interval to %d.\n",
+               bond->dev->name, new_value);
+       bond->params.miimon = new_value;
+       if (bond->params.updelay)
+               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.updelay * bond->params.miimon);
+       if (bond->params.downdelay)
+               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.downdelay * bond->params.miimon);
+       if (new_value && bond->params.arp_interval) {
+               pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+                       bond->dev->name);
+               bond->params.arp_interval = 0;
+               if (bond->params.arp_validate)
+                       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+       }
+       if (bond->dev->flags & IFF_UP) {
+               /* If the interface is up, we may need to fire off
+                * the MII timer. If the interface is down, the
+                * timer will get fired off when the open function
+                * is called.
+                */
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->mii_work);
+               } else {
                        cancel_delayed_work_sync(&bond->arp_work);
                        queue_delayed_work(bond->wq, &bond->mii_work, 0);
                }
index b39ca5b3ea7faee34c2264ea7fbc6ac14e3caf35..ff2ba86cd4a495cdd29020f02561a1f31926ff11 100644 (file)
@@ -46,6 +46,7 @@ config CAN_EMS_PCI
 config CAN_PEAK_PCMCIA
        tristate "PEAK PCAN-PC Card"
        depends on PCMCIA
+       depends on HAS_IOPORT
        ---help---
          This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
          from PEAK-System (http://www.peak-system.com). To compile this
index a042cdc260dc720f982c44ae1bf8da315c0faaf6..3c18d7d000edaabe3b8ec93f1d2841d3300df087 100644 (file)
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         */
        if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
            REG_CR_BASICCAN_INITIAL &&
-           (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+           (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
            (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
                flag = 1;
 
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         * See states on p. 23 of the Datasheet.
         */
        if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
-           priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+           priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
            priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
                return flag;
 
index daf4013a8fc720ca0c73a9df96b647ad5c314839..e4df307eaa9081b9e39a81731714e0bd3969ea3b 100644 (file)
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
         */
        spin_lock_irqsave(&priv->cmdreg_lock, flags);
        priv->write_reg(priv, REG_CMR, val);
-       priv->read_reg(priv, REG_SR);
+       priv->read_reg(priv, SJA1000_REG_SR);
        spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
 }
 
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
 
        while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
                n++;
-               status = priv->read_reg(priv, REG_SR);
+               status = priv->read_reg(priv, SJA1000_REG_SR);
                /* check for absent controller due to hw unplug */
                if (status == 0xFF && sja1000_is_absent(priv))
                        return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
                        /* receive interrupt */
                        while (status & SR_RBS) {
                                sja1000_rx(dev);
-                               status = priv->read_reg(priv, REG_SR);
+                               status = priv->read_reg(priv, SJA1000_REG_SR);
                                /* check for absent controller */
                                if (status == 0xFF && sja1000_is_absent(priv))
                                        return IRQ_NONE;
index afa99847a5101ffa6399726b57d4189f6c3057f5..aa48e053da27ce26ed394e8a4584ef9805828372 100644 (file)
@@ -56,7 +56,7 @@
 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
 #define REG_MOD                0x00
 #define REG_CMR                0x01
-#define REG_SR         0x02
+#define SJA1000_REG_SR         0x02
 #define REG_IR         0x03
 #define REG_IER                0x04
 #define REG_ALC                0x0B
index 829b5ad71d0da0a4634cd211bc3f5ffc67215db6..edfdf6b950e73ad82f8c1e3749a973c113237bb7 100644 (file)
@@ -438,7 +438,6 @@ struct atl1e_adapter {
        struct atl1e_hw        hw;
        struct atl1e_hw_stats  hw_stats;
 
-       bool have_msi;
        u32 wol;
        u16 link_speed;
        u16 link_duplex;
index 92f4734f860d3f18ece977946b4562c028df35e6..f73d5609439ac8d054b7c3303bf0baadfa576877 100644 (file)
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        free_irq(adapter->pdev->irq, netdev);
-
-       if (adapter->have_msi)
-               pci_disable_msi(adapter->pdev);
 }
 
 static int atl1e_request_irq(struct atl1e_adapter *adapter)
 {
        struct pci_dev    *pdev   = adapter->pdev;
        struct net_device *netdev = adapter->netdev;
-       int flags = 0;
        int err = 0;
 
-       adapter->have_msi = true;
-       err = pci_enable_msi(pdev);
-       if (err) {
-               netdev_dbg(netdev,
-                          "Unable to allocate MSI interrupt Error: %d\n", err);
-               adapter->have_msi = false;
-       }
-
-       if (!adapter->have_msi)
-               flags |= IRQF_SHARED;
-       err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
+       err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
+                         netdev);
        if (err) {
                netdev_dbg(adapter->netdev,
                           "Unable to allocate interrupt Error: %d\n", err);
-               if (adapter->have_msi)
-                       pci_disable_msi(pdev);
                return err;
        }
        netdev_dbg(netdev, "atl1e_request_irq OK\n");
index 568205436a15f4d5b37528eb155df6e00ee3f07c..91ecd6a00d05a77453e4311eb5741064ba03ded2 100644 (file)
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
                        break;
                default:
                        BNX2X_ERR("Non valid capability ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
                        break;
                default:
                        BNX2X_ERR("Non valid TC-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
        return -EINVAL;
 }
 
-static u8  bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
 {
        struct bnx2x *bp = netdev_priv(netdev);
        DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
                        break;
                default:
                        BNX2X_ERR("Non valid featrue-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
                        break;
                default:
                        BNX2X_ERR("Non valid featrue-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
index 67d2663b3974aeaf03acfeb9500704ee5659ab8a..17a972734ba746d9218df4bf025218a91ebee947 100644 (file)
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
                if (j + len > block_end)
                        goto partno;
 
-               memcpy(tp->fw_ver, &vpd_data[j], len);
-               strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+               if (len >= sizeof(tp->fw_ver))
+                       len = sizeof(tp->fw_ver) - 1;
+               memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+               snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+                        &vpd_data[j]);
        }
 
 partno:
index a170065b597382ed9408239d98c36cec98a535a2..b0ebc9f6d55e9844e1ebe9367e9340681cba2a72 100644 (file)
 #define XGMAC_FLOW_CTRL_FCB_BPA        0x00000001      /* Flow Control Busy ... */
 
 /* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMTIM   0x00800000      /* PMT Interrupt Mask */
 #define XGMAC_INT_STAT_PMT     0x0080          /* PMT Interrupt Status */
 #define XGMAC_INT_STAT_LPI     0x0040          /* LPI Interrupt Status */
 
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
 
+       /* Mask power mgt interrupt */
+       writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
+
        /* XGMAC requires AXI bus init. This is a 'magic number' for now */
        writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
 
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
                struct sk_buff *skb;
                int frame_len;
 
+               if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
+                       break;
+
                entry = priv->rx_tail;
                p = priv->dma_rx + entry;
                if (desc_get_owner(p))
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
        unsigned int pmt = 0;
 
        if (mode & WAKE_MAGIC)
-               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
        if (mode & WAKE_UCAST)
                pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
 
index 8cdf02503d13d5bdd52348f77995c0ce47618f1c..9eada8e86078fe83981223e023bd45f3929b1a70 100644 (file)
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
                tmp = readl(reg);
 }
 
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+       if (db->in_suspend)
+               mdelay(ms);
+       else
+               msleep(ms);
+}
+
+/* Read a word from phyxcer */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned int reg_save;
+       int ret;
+
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Issue phyxcer read command */
+       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait read complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
+
+       /* The read data keeps on REG_0D & REG_0E */
+       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       mutex_unlock(&db->addr_lock);
+
+       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+       return ret;
+}
+
+/* Write a word to phyxcer */
+static void
+dm9000_phy_write(struct net_device *dev,
+                int phyaddr_unused, int reg, int value)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned long reg_save;
+
+       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Fill the written data into REG_0D & REG_0E */
+       iow(db, DM9000_EPDRL, value);
+       iow(db, DM9000_EPDRH, value >> 8);
+
+       /* Issue phyxcer write command */
+       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait write complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+
+       spin_unlock_irqrestore(&db->lock, flags);
+       mutex_unlock(&db->addr_lock);
+}
+
 /* dm9000_set_io
  *
  * select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
 
+       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
        /* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
        return 0;
 }
 
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
-       if (db->in_suspend)
-               mdelay(ms);
-       else
-               msleep(ms);
-}
-
-/*
- *   Read a word from phyxcer
- */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned int reg_save;
-       int ret;
-
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);   /* Issue phyxcer read command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       dm9000_msleep(db, 1);           /* Wait read complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
-
-       /* The read data keeps on REG_0D & REG_0E */
-       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       mutex_unlock(&db->addr_lock);
-
-       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
-       return ret;
-}
-
-/*
- *   Write a word to phyxcer
- */
-static void
-dm9000_phy_write(struct net_device *dev,
-                int phyaddr_unused, int reg, int value)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned long reg_save;
-
-       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       /* Fill the written data into REG_0D & REG_0E */
-       iow(db, DM9000_EPDRL, value);
-       iow(db, DM9000_EPDRH, value >> 8);
-
-       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);   /* Issue phyxcer write command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock, flags);
-
-       dm9000_msleep(db, 1);           /* Wait write complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-
-       spin_unlock_irqrestore(&db->lock, flags);
-       mutex_unlock(&db->addr_lock);
-}
-
 static void
 dm9000_shutdown(struct net_device *dev)
 {
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
        db->flags |= DM9000_PLATF_SIMPLE_PHY;
 #endif
 
-       dm9000_reset(db);
+       /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
+        * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
+        * while probe stage.
+        */
+
+       iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
 
        /* try multiple times, DM9000 sometimes gets the read wrong */
        for (i = 0; i < 8; i++) {
index 55688bd1a3ef0bb83ed90391c33bce894a16da89..9ce058adababcf28c83f644819b0b501079ef898 100644 (file)
@@ -69,7 +69,9 @@
 #define NCR_WAKEEN          (1<<6)
 #define NCR_FCOL            (1<<4)
 #define NCR_FDX             (1<<3)
-#define NCR_LBK             (3<<1)
+
+#define NCR_RESERVED        (3<<1)
+#define NCR_MAC_LBK         (1<<1)
 #define NCR_RST                    (1<<0)
 
 #define NSR_SPEED           (1<<7)
 #define ISR_LNKCHNG            (1<<5)
 #define ISR_UNDERRUN           (1<<4)
 
+/* Davicom MII registers.
+ */
+
+#define MII_DM_DSPCR           0x1b    /* DSP Control Register */
+
+#define DSPCR_INIT_PARAM       0xE100  /* DSP init parameter */
+
 #endif /* _DM9000X_H_ */
 
index e3f39372ce25f93b63f93021a68e4dc4bc7294fb..f292c3aa423fbdeabb83ebdafa3b1699e988b19a 100644 (file)
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+       struct fec_enet_private *fep = netdev_priv(dev);
+       struct bufdesc *bdp;
+       unsigned int i;
+
+       /* Initialize the receive buffer descriptors. */
+       bdp = fep->rx_bd_base;
+       for (i = 0; i < RX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               if (bdp->cbd_bufaddr)
+                       bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               else
+                       bdp->cbd_sc = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+
+       fep->cur_rx = fep->rx_bd_base;
+
+       /* ...and the same for transmit */
+       bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
+       for (i = 0; i < TX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               bdp->cbd_sc = 0;
+               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+                       dev_kfree_skb_any(fep->tx_skbuff[i]);
+                       fep->tx_skbuff[i] = NULL;
+               }
+               bdp->cbd_bufaddr = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
+}
+
 /* This function is called to start or restart the FEC during a link
  * change.  This only happens when switching between half and full
  * duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
        /* Set maximum receive buffer size. */
        writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 
+       fec_enet_bd_init(ndev);
+
        /* Set receive and transmit descriptor base. */
        writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
        if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->cur_rx = fep->rx_bd_base;
 
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
@@ -1332,7 +1380,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 static void fec_enet_free_buffers(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int i;
+       unsigned int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
 
@@ -1356,7 +1404,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
 static int fec_enet_alloc_buffers(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int i;
+       unsigned int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
 
@@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct bufdesc *cbd_base;
-       struct bufdesc *bdp;
-       int i;
 
        /* Allocate memory for buffer descriptors. */
        cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
                return -ENOMEM;
        }
 
+       memset(cbd_base, 0, PAGE_SIZE);
        spin_lock_init(&fep->hw_lock);
 
        fep->netdev = ndev;
@@ -1631,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
 
-       /* Initialize the receive buffer descriptors. */
-       bdp = fep->rx_bd_base;
-       for (i = 0; i < RX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-
-       /* ...and the same for transmit */
-       bdp = fep->tx_bd_base;
-       fep->cur_tx = bdp;
-       for (i = 0; i < TX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp->cbd_bufaddr = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-       fep->dirty_tx = bdp;
-
        fec_restart(ndev, 0);
 
        return 0;
index 1f17ca0f22019d8350af2affdcc6b00739ce966f..0d8df400a4798c06a1345773b9a0f821b4ab7cb2 100644 (file)
@@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
 
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 }
+EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
 
 /**
  * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
            -EFAULT : 0;
 }
+EXPORT_SYMBOL(fec_ptp_ioctl);
 
 /**
  * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
                pr_info("registered PHC device on %s\n", ndev->name);
        }
 }
+EXPORT_SYMBOL(fec_ptp_init);
index 43462d596a4e5b04050793d66e26b0172a4f78de..ffd287196bf875560e40e5330c68e0a84b50e930 100644 (file)
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                txdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data, skb->len,
                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
+                       ret_val = 4;
+                       goto err_nomem;
+               }
                tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
                tx_desc->lower.data = cpu_to_le32(skb->len);
                tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
                                    GFP_KERNEL);
        if (!rxdr->buffer_info) {
-               ret_val = 4;
+               ret_val = 5;
                goto err_nomem;
        }
 
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
                                        GFP_KERNEL);
        if (!rxdr->desc) {
-               ret_val = 5;
+               ret_val = 6;
                goto err_nomem;
        }
        memset(rxdr->desc, 0, rxdr->size);
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
                skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
                if (!skb) {
-                       ret_val = 6;
+                       ret_val = 7;
                        goto err_nomem;
                }
                skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                rxdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data,
                                       E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
+                       ret_val = 8;
+                       goto err_nomem;
+               }
                rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
                memset(skb->data, 0x00, skb->len);
        }
index 948b86ffa4f027753eec3f5b57b45b90277947ab..7e615e2bf7e6a4e96971945d33e3e332301e88bb 100644 (file)
@@ -848,11 +848,16 @@ check_page:
                        }
                }
 
-               if (!buffer_info->dma)
+               if (!buffer_info->dma) {
                        buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
                                                        DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                               adapter->alloc_rx_buff_failed++;
+                               break;
+                       }
+               }
 
                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
index b64542acfa3449bda2ee5c793d83de6cda1f097c..12b1d84808084269971e5f83f55bd8b0f3fb8240 100644 (file)
@@ -1818,27 +1818,32 @@ out:
  **/
 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
 {
-       u32 dtxswc;
+       u32 reg_val, reg_offset;
 
        switch (hw->mac.type) {
        case e1000_82576:
+               reg_offset = E1000_DTXSWC;
+               break;
        case e1000_i350:
-               dtxswc = rd32(E1000_DTXSWC);
-               if (enable) {
-                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
-                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
-                       /* The PF can spoof - it has to in order to
-                        * support emulation mode NICs */
-                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
-               } else {
-                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
-                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
-               }
-               wr32(E1000_DTXSWC, dtxswc);
+               reg_offset = E1000_TXSWC;
                break;
        default:
-               break;
+               return;
+       }
+
+       reg_val = rd32(reg_offset);
+       if (enable) {
+               reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                            E1000_DTXSWC_VLAN_SPOOF_MASK);
+               /* The PF can spoof - it has to in order to
+                * support emulation mode NICs
+                */
+               reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+       } else {
+               reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                            E1000_DTXSWC_VLAN_SPOOF_MASK);
        }
+       wr32(reg_offset, reg_val);
 }
 
 /**
index 4623502054d5347b2723811fe1cdca0d1e93ae0e..0478a1abe54110d1374c6d77d950f6a4d7bf0327 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/pci.h>
 
 #ifdef CONFIG_IGB_HWMON
-struct i2c_board_info i350_sensor_info = {
+static struct i2c_board_info i350_sensor_info = {
        I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
 };
 
index 4dbd62968c7a18090a61029249c04f6ee3b1335e..8496adfc6a685580f6ec1c50b86f0fed62b2b121 100644 (file)
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
                return;
 
-       igb_enable_sriov(pdev, max_vfs);
        pci_sriov_set_totalvfs(pdev, 7);
+       igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
                if (max_vfs > 7) {
                        dev_warn(&pdev->dev,
                                 "Maximum of 7 VFs per PF, using max\n");
-                       adapter->vfs_allocated_count = 7;
+                       max_vfs = adapter->vfs_allocated_count = 7;
                } else
                        adapter->vfs_allocated_count = max_vfs;
                if (adapter->vfs_allocated_count)
index 0987822359f00590d7f36bc5e126c8a4f20ee1ac..0a237507ee85008e9e11c703b07f58730202e072 100644 (file)
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
        case e1000_82576:
                snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
                adapter->ptp_caps.owner = THIS_MODULE;
-               adapter->ptp_caps.max_adj = 1000000000;
+               adapter->ptp_caps.max_adj = 999999881;
                adapter->ptp_caps.n_ext_ts = 0;
                adapter->ptp_caps.pps = 0;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
index ea48083734359e52b229f592e84abf7654a82523..b5f94abe3cffc9c406f8e2aaa27ed9cd77ad6542 100644 (file)
@@ -2159,6 +2159,10 @@ map_skb:
                                                  skb->data,
                                                  adapter->rx_buffer_len,
                                                  DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                       adapter->alloc_rx_buff_failed++;
+                       break;
+               }
 
                rx_desc = IXGB_RX_DESC(*rx_ring, i);
                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2172,8 @@ map_skb:
                rx_desc->status = 0;
 
 
-               if (++i == rx_ring->count) i = 0;
+               if (++i == rx_ring->count)
+                       i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
index c3db6cd69b68b0135f42908fd738bd7c9fd4fddc..2b6cb5ca48eefe8f0a075b1166f24dff7aaaaa83 100644 (file)
@@ -944,9 +944,17 @@ free_queue_irqs:
                free_irq(adapter->msix_entries[vector].vector,
                         adapter->q_vector[vector]);
        }
-       pci_disable_msix(adapter->pdev);
-       kfree(adapter->msix_entries);
-       adapter->msix_entries = NULL;
+       /* This failure is non-recoverable - it indicates the system is
+        * out of MSIX vector resources and the VF driver cannot run
+        * without them.  Set the number of msix vectors to zero
+        * indicating that not enough can be allocated.  The error
+        * will be returned to the user indicating device open failed.
+        * Any further attempts to force the driver to open will also
+        * fail.  The only way to recover is to unload the driver and
+        * reload it again.  If the system has recovered some MSIX
+        * vectors then it may succeed.
+        */
+       adapter->num_msix_vectors = 0;
        return err;
 }
 
@@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev)
        struct ixgbe_hw *hw = &adapter->hw;
        int err;
 
+       /* A previous failure to open the device because of a lack of
+        * available MSIX vector resources may have reset the number
+        * of msix vectors variable to zero.  The only way to recover
+        * is to unload/reload the driver and hope that the system has
+        * been able to recover some MSIX vector resources.
+        */
+       if (!adapter->num_msix_vectors)
+               return -ENOMEM;
+
        /* disallow open during test */
        if (test_bit(__IXGBEVF_TESTING, &adapter->state))
                return -EBUSY;
@@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev)
 
 err_req_irq:
        ixgbevf_down(adapter);
-       ixgbevf_free_irq(adapter);
 err_setup_rx:
        ixgbevf_free_all_rx_resources(adapter);
 err_setup_tx:
index 6a2127489af78e718264f202737ba0667770ba1b..bfdb06860397e720a848d6e8cab7b324ded6ea1c 100644 (file)
@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
        return 0;
 
 err_free:
-       kfree(dev);
+       free_netdev(dev);
 err_out:
        return err;
 }
index fc07ca35721b29ba07dba97bf6e52dc74f446534..6a0e671fcecd6e399952056646130578191ba57d 100644 (file)
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 
-               tp = space - 2048/8;
+               tp = space - 8192/8;
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
        } else {
index 615ac63ea8603a802896d2bd810d4c2ac874204c..ec6dcd80152bdd46550b0aaf0865d00a01261a93 100644 (file)
@@ -2074,7 +2074,7 @@ enum {
        GM_IS_RX_FF_OR  = 1<<1, /* Receive FIFO Overrun */
        GM_IS_RX_COMPL  = 1<<0, /* Frame Reception Complete */
 
-#define GMAC_DEF_MSK     GM_IS_TX_FF_UR
+#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
 };
 
 /*     GMAC_LINK_CTRL  16 bit  GMAC Link Control Reg (YUKON only) */
index 995d4b6d5c1e924a870f3da67c6ce1b543b99a83..30d78f806dc3735dd3d8a4aa0c9a21a867976cd2 100644 (file)
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
 {
-       unsigned int i;
-       for (i = ETH_ALEN - 1; i; --i) {
+       int i;
+       for (i = ETH_ALEN - 1; i >= 0; --i) {
                dst_mac[i] = src_mac & 0xff;
                src_mac >>= 8;
        }
@@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        /* Flush multicast filter */
        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
+       /* Remove flow steering rules for the port*/
+       if (mdev->dev->caps.steering_mode ==
+           MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               ASSERT_RTNL();
+               list_for_each_entry_safe(flow, tmp_flow,
+                                        &priv->ethtool_list, list) {
+                       mlx4_flow_detach(mdev->dev, flow->id);
+                       list_del(&flow->list);
+               }
+       }
+
        mlx4_en_destroy_drop_qp(priv);
 
        /* Free TX Rings */
@@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
                mdev->mac_removed[priv->port] = 1;
 
-       /* Remove flow steering rules for the port*/
-       if (mdev->dev->caps.steering_mode ==
-           MLX4_STEERING_MODE_DEVICE_MANAGED) {
-               ASSERT_RTNL();
-               list_for_each_entry_safe(flow, tmp_flow,
-                                        &priv->ethtool_list, list) {
-                       mlx4_flow_detach(mdev->dev, flow->id);
-                       list_del(&flow->list);
-               }
-       }
-
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
                mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
index 251ae2f9311680cb69eaada07cf3ad8a3cf58354..8e3123a1df886de6b0afb2c60397ecd28be3e4b9 100644 (file)
@@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
        struct mlx4_slave_event_eq_info *event_eq =
                priv->mfunc.master.slave_state[slave].event_eq;
        u32 in_modifier = vhcr->in_modifier;
-       u32 eqn = in_modifier & 0x1FF;
+       u32 eqn = in_modifier & 0x3FF;
        u64 in_param =  vhcr->in_param;
        int err = 0;
        int i;
index 2995687f1aee3692ef5c164cff02870607f76224..1391b52f443aa5489ec31ee7e155b087f0372b0c 100644 (file)
@@ -99,6 +99,7 @@ struct res_qp {
        struct list_head        mcg_list;
        spinlock_t              mcg_spl;
        int                     local_qpn;
+       atomic_t                ref_count;
 };
 
 enum res_mtt_states {
@@ -197,6 +198,7 @@ enum res_fs_rule_states {
 
 struct res_fs_rule {
        struct res_common       com;
+       int                     qpn;
 };
 
 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev)
        return dev->caps.num_mpts - 1;
 }
 
-static void *find_res(struct mlx4_dev *dev, int res_id,
+static void *find_res(struct mlx4_dev *dev, u64 res_id,
                      enum mlx4_resource type)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id)
        ret->local_qpn = id;
        INIT_LIST_HEAD(&ret->mcg_list);
        spin_lock_init(&ret->mcg_spl);
+       atomic_set(&ret->ref_count, 0);
 
        return &ret->com;
 }
@@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
        return &ret->com;
 }
 
-static struct res_common *alloc_fs_rule_tr(u64 id)
+static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 {
        struct res_fs_rule *ret;
 
@@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id)
 
        ret->com.res_id = id;
        ret->com.state = RES_FS_RULE_ALLOCATED;
-
+       ret->qpn = qpn;
        return &ret->com;
 }
 
@@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
                ret = alloc_xrcdn_tr(id);
                break;
        case RES_FS_RULE:
-               ret = alloc_fs_rule_tr(id);
+               ret = alloc_fs_rule_tr(id, extra);
                break;
        default:
                return NULL;
@@ -671,10 +674,14 @@ undo:
 
 static int remove_qp_ok(struct res_qp *res)
 {
-       if (res->com.state == RES_QP_BUSY)
+       if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
+           !list_empty(&res->mcg_list)) {
+               pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
+                      res->com.state, atomic_read(&res->ref_count));
                return -EBUSY;
-       else if (res->com.state != RES_QP_RESERVED)
+       } else if (res->com.state != RES_QP_RESERVED) {
                return -EPERM;
+       }
 
        return 0;
 }
@@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
        int err;
        int qpn;
+       struct res_qp *rqp;
        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
        struct _rule_hw  *rule_header;
        int header_id;
@@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 
        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
-       err = get_res(dev, slave, qpn, RES_QP, NULL);
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
                pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
                return err;
@@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        if (err)
                goto err_put;
 
-       err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+       err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
        if (err) {
                mlx4_err(dev, "Fail to add flow steering resources.\n ");
                /* detach rule*/
                mlx4_cmd(dev, vhcr->out_param, 0, 0,
                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
                         MLX4_CMD_NATIVE);
+               goto err_put;
        }
+       atomic_inc(&rqp->ref_count);
 err_put:
        put_res(dev, slave, qpn, RES_QP);
        return err;
@@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_cmd_info *cmd)
 {
        int err;
+       struct res_qp *rqp;
+       struct res_fs_rule *rrule;
 
        if (dev->caps.steering_mode !=
            MLX4_STEERING_MODE_DEVICE_MANAGED)
                return -EOPNOTSUPP;
 
+       err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
+       if (err)
+               return err;
+       /* Release the rule form busy state before removal */
+       put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
+       err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+       if (err)
+               return err;
+
        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
        if (err) {
                mlx4_err(dev, "Fail to remove flow steering resources.\n ");
-               return err;
+               goto out;
        }
 
        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
                       MLX4_CMD_NATIVE);
+       if (!err)
+               atomic_dec(&rqp->ref_count);
+out:
+       put_res(dev, slave, rrule->qpn, RES_QP);
        return err;
 }
 
@@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
        /*VLAN*/
        rem_slave_macs(dev, slave);
+       rem_slave_fs_rule(dev, slave);
        rem_slave_qps(dev, slave);
        rem_slave_srqs(dev, slave);
        rem_slave_cqs(dev, slave);
@@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        rem_slave_mtts(dev, slave);
        rem_slave_counters(dev, slave);
        rem_slave_xrcdns(dev, slave);
-       rem_slave_fs_rule(dev, slave);
        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
index 33bcb63d56a2355c93dbd1306f24bce816eab23c..8fb481252e2cb653f45f7543f7e41c70cef9401e 100644 (file)
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
        for (; rxfc != 0; rxfc--) {
                rxh = ks8851_rdreg32(ks, KS_RXFHSR);
                rxstat = rxh & 0xffff;
-               rxlen = rxh >> 16;
+               rxlen = (rxh >> 16) & 0xfff;
 
                netif_dbg(ks, rx_status, ks->netdev,
                          "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
index c4122c86f829293cc60d6dd48c72602d7dded49f..efa29b712d5f1bdc9793648ebd76f9201cc3f99a 100644 (file)
@@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, ndev);
 
-       if (lpc_mii_init(pldat) != 0)
+       ret = lpc_mii_init(pldat);
+       if (ret)
                goto err_out_unregister_netdev;
 
        netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
index 39ab4d09faaa2eb500748466ba6ab0fbe4074ac9..73ce7dd6b9544d470a6e14b163f7ae751d3d749d 100644 (file)
@@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 
                        skb->protocol = eth_type_trans(skb, netdev);
                        if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
-                               skb->ip_summed = CHECKSUM_NONE;
-                       else
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               skb->ip_summed = CHECKSUM_NONE;
 
                        napi_gro_receive(&adapter->napi, skb);
                        (*work_done)++;
index 33e96176e4d82167a2d62a51f1f1eb5d711f7375..6ed333fe5c04cd36618c465c0a3bec8dafbe8fcc 100644 (file)
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                if (felic_stat & ECSR_LCHNG) {
                        /* Link Changed */
                        if (mdp->cd->no_psr || mdp->no_ether_link) {
-                               if (mdp->link == PHY_DOWN)
-                                       link_stat = 0;
-                               else
-                                       link_stat = PHY_ST_LINK;
+                               goto ignore_link;
                        } else {
                                link_stat = (sh_eth_read(ndev, PSR));
                                if (mdp->ether_link_active_low)
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                }
        }
 
+ignore_link:
        if (intr_status & EESR_TWB) {
                /* Write buck end. unused write back interrupt */
                if (intr_status & EESR_TABT)    /* Transmit Abort int */
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       u32 intr_status = 0;
+       unsigned long intr_status;
 
        spin_lock(&mdp->lock);
 
-       /* Get interrpt stat */
+       /* Get interrupt status */
        intr_status = sh_eth_read(ndev, EESR);
+       /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+        * enabled since it's the one that  comes thru regardless of the mask,
+        * and we need to fully handle it in sh_eth_error() in order to quench
+        * it as it doesn't get cleared by just writing 1 to the ECI bit...
+        */
+       intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
        /* Clear interrupt */
        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
        struct phy_device *phydev = mdp->phydev;
        int new_state = 0;
 
-       if (phydev->link != PHY_DOWN) {
+       if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
                        mdp->duplex = phydev->duplex;
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        if (mdp->cd->set_rate)
                                mdp->cd->set_rate(ndev);
                }
-               if (mdp->link == PHY_DOWN) {
+               if (!mdp->link) {
                        sh_eth_write(ndev,
                                (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
+                       if (mdp->cd->no_psr || mdp->no_ether_link)
+                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
-               mdp->link = PHY_DOWN;
+               mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
+               if (mdp->cd->no_psr || mdp->no_ether_link)
+                       sh_eth_rcv_snd_disable(ndev);
        }
 
        if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
        snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
                mdp->mii_bus->id , mdp->phy_id);
 
-       mdp->link = PHY_DOWN;
+       mdp->link = 0;
        mdp->speed = 0;
        mdp->duplex = -1;
 
@@ -2220,6 +2228,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 /* MDIO bus release function */
 static int sh_mdio_release(struct net_device *ndev)
 {
+       struct sh_eth_private *mdp = netdev_priv(ndev);
        struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
 
        /* unregister mdio bus */
@@ -2234,6 +2243,9 @@ static int sh_mdio_release(struct net_device *ndev)
        /* free bitbang info */
        free_mdio_bitbang(bus);
 
+       /* free bitbang memory */
+       kfree(mdp->bitbang);
+
        return 0;
 }
 
@@ -2262,6 +2274,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
        bitbang->ctrl.ops = &bb_ops;
 
        /* MII controller setting */
+       mdp->bitbang = bitbang;
        mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
        if (!mdp->mii_bus) {
                ret = -ENOMEM;
@@ -2441,6 +2454,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                }
                mdp->tsu_addr = ioremap(rtsu->start,
                                        resource_size(rtsu));
+               if (mdp->tsu_addr == NULL) {
+                       ret = -ENOMEM;
+                       dev_err(&pdev->dev, "TSU ioremap failed.\n");
+                       goto out_release;
+               }
                mdp->port = devno % 2;
                ndev->features = NETIF_F_HW_VLAN_FILTER;
        }
index bae84fd2e73a7419504d8b29b195c0a118711b8a..828be4515008145507f59a5bbe34ac64eb28090b 100644 (file)
@@ -705,6 +705,7 @@ struct sh_eth_private {
        const u16 *reg_offset;
        void __iomem *addr;
        void __iomem *tsu_addr;
+       struct bb_info *bitbang;
        u32 num_rx_ring;
        u32 num_tx_ring;
        dma_addr_t rx_desc_dma;
@@ -722,7 +723,7 @@ struct sh_eth_private {
        u32 phy_id;                                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
        struct phy_device *phydev;      /* PHY device control */
-       enum phy_state link;
+       int link;
        phy_interface_t phy_interface;
        int msg_enable;
        int speed;
index 75c48558e6fd9b8afe355cbe41d95ab9dd842206..80cad06e5eb21337b111bc32af0376e6da2d7065 100644 (file)
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        cpts_tx_timestamp(priv->cpts, skb);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += len;
@@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                struct platform_device *mdio;
 
                parp = of_get_property(slave_node, "phy_id", &lenp);
-               if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+               if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        pr_err("Missing slave[%d] phy_id property\n", i);
                        ret = -EINVAL;
                        goto error_ret;
index ae1b77aa199f87ab947c185b552d9274266dac91..72300bc9e3783842ef3608be24f0f33d2ed0f69b 100644 (file)
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        ndev->stats.tx_packets++;
        ndev->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
index 9abe51710f229377cb9f405ffedb198eab580738..1a15ec14c386f3c4e05808722bd84a196d82f3a8 100644 (file)
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
 static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct usbnet *dev = netdev_priv(netdev);
+       int ret;
+
+       if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+               return -EINVAL;
 
-       int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
+       ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set mac rx frame length\n");
                return ret;
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
 
-       ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
+       ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set max rx frame length\n");
                return ret;
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
                                dev->net->stats.rx_frame_errors++;
                } else {
-                       /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
-                       if (unlikely(size > (ETH_FRAME_LEN + 12))) {
+                       /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+                       if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
                                netif_dbg(dev, rx_err, dev->net,
                                          "size err rx_cmd_a=0x%08x\n",
                                          rx_cmd_a);
index 4cc13940c8950d2a7ad2a3fdf1a092ac3736a7d5..f76c3ca07a4501603883bbcbe9feb901d4eca390 100644 (file)
@@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
                                          AR_PHY_AGC_CONTROL_FLTR_CAL   |
                                          AR_PHY_AGC_CONTROL_PKDET_CAL;
 
+       /* Use chip chainmask only for calibration */
        ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
 
        if (rtt) {
@@ -1150,6 +1151,9 @@ skip_tx_iqcal:
                ar9003_hw_rtt_disable(ah);
        }
 
+       /* Revert chainmask to runtime parameters */
+       ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
        /* Initialize list pointers */
        ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
index ade3afb21f911e86589e325c2e99b49b7fe91289..7fdac6c7b3ea5dc030136d66c4574c4d8373a82e 100644 (file)
@@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work)
        int i;
        bool needreset = false;
 
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
-               if (ATH_TXQ_SETUP(sc, i)) {
-                       txq = &sc->tx.txq[i];
-                       ath_txq_lock(sc, txq);
-                       if (txq->axq_depth) {
-                               if (txq->axq_tx_inprogress) {
-                                       needreset = true;
-                                       ath_txq_unlock(sc, txq);
-                                       break;
-                               } else {
-                                       txq->axq_tx_inprogress = true;
-                               }
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               txq = sc->tx.txq_map[i];
+
+               ath_txq_lock(sc, txq);
+               if (txq->axq_depth) {
+                       if (txq->axq_tx_inprogress) {
+                               needreset = true;
+                               ath_txq_unlock(sc, txq);
+                               break;
+                       } else {
+                               txq->axq_tx_inprogress = true;
                        }
-                       ath_txq_unlock_complete(sc, txq);
                }
+               ath_txq_unlock_complete(sc, txq);
+       }
 
        if (needreset) {
                ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
 
-       ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+       if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
+               ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 }
 
 /*
index 38bc5a7997ffd43b9a8c8f45a6329fe4ffdac5c1..122146943bf204693b540b124f6c947bec34833b 100644 (file)
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        const struct b43_dma_ops *ops;
        struct b43_dmaring *ring;
        struct b43_dmadesc_meta *meta;
+       static const struct b43_txstatus fake; /* filled with 0 */
+       const struct b43_txstatus *txstat;
        int slot, firstused;
        bool frame_succeed;
+       int skip;
+       static u8 err_out1, err_out2;
 
        ring = parse_cookie(dev, status->cookie, &slot);
        if (unlikely(!ring))
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        firstused = ring->current_slot - ring->used_slots + 1;
        if (firstused < 0)
                firstused = ring->nr_slots + firstused;
+
+       skip = 0;
        if (unlikely(slot != firstused)) {
                /* This possibly is a firmware bug and will result in
-                * malfunction, memory leaks and/or stall of DMA functionality. */
-               b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
-                      "Expected %d, but got %d\n",
-                      ring->index, firstused, slot);
-               return;
+                * malfunction, memory leaks and/or stall of DMA functionality.
+                */
+               if (slot == next_slot(ring, next_slot(ring, firstused))) {
+                       /* If a single header/data pair was missed, skip over
+                        * the first two slots in an attempt to recover.
+                        */
+                       slot = firstused;
+                       skip = 2;
+                       if (!err_out1) {
+                               /* Report the error once. */
+                               b43dbg(dev->wl,
+                                      "Skip on DMA ring %d slot %d.\n",
+                                      ring->index, slot);
+                               err_out1 = 1;
+                       }
+               } else {
+                       /* More than a single header/data pair were missed.
+                        * Report this error once.
+                        */
+                       if (!err_out2)
+                               b43dbg(dev->wl,
+                                      "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
+                                      ring->index, firstused, slot);
+                       err_out2 = 1;
+                       return;
+               }
        }
 
        ops = ring->ops;
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                               slot, firstused, ring->index);
                        break;
                }
+
                if (meta->skb) {
                        struct b43_private_tx_info *priv_info =
-                               b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+                            b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
 
-                       unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+                       unmap_descbuffer(ring, meta->dmaaddr,
+                                        meta->skb->len, 1);
                        kfree(priv_info->bouncebuffer);
                        priv_info->bouncebuffer = NULL;
                } else {
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        struct ieee80211_tx_info *info;
 
                        if (unlikely(!meta->skb)) {
-                               /* This is a scatter-gather fragment of a frame, so
-                                * the skb pointer must not be NULL. */
+                               /* This is a scatter-gather fragment of a frame,
+                                * so the skb pointer must not be NULL.
+                                */
                                b43dbg(dev->wl, "TX status unexpected NULL skb "
                                       "at slot %d (first=%d) on ring %d\n",
                                       slot, firstused, ring->index);
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
 
                        /*
                         * Call back to inform the ieee80211 subsystem about
-                        * the status of the transmission.
+                        * the status of the transmission. When skipping over
+                        * a missed TX status report, use a status structure
+                        * filled with zeros to indicate that the frame was not
+                        * sent (frame_count 0) and not acknowledged
                         */
-                       frame_succeed = b43_fill_txstatus_report(dev, info, status);
+                       if (unlikely(skip))
+                               txstat = &fake;
+                       else
+                               txstat = status;
+
+                       frame_succeed = b43_fill_txstatus_report(dev, info,
+                                                                txstat);
 #ifdef CONFIG_B43_DEBUG
                        if (frame_succeed)
                                ring->nr_succeed_tx_packets++;
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                /* Everything unmapped and free'd. So it's not used anymore. */
                ring->used_slots--;
 
-               if (meta->is_last_fragment) {
+               if (meta->is_last_fragment && !skip) {
                        /* This is the last scatter-gather
                         * fragment of the frame. We are done. */
                        break;
                }
                slot = next_slot(ring, slot);
+               if (skip > 0)
+                       --skip;
        }
        if (ring->stopped) {
                B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
index 3c35382ee6c23ebfbaed8d1dcfdf33e804faec96..e8486c1e091af2f2db2a23827dc612315398751b 100644 (file)
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        u16 clip_off[2] = { 0xFFFF, 0xFFFF };
 
        u8 vcm_final = 0;
-       s8 offset[4];
+       s32 offset[4];
        s32 results[8][4] = { };
        s32 results_min[4] = { };
        s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                }
                for (i = 0; i < 4; i += 2) {
                        s32 curr;
-                       s32 mind = 40;
+                       s32 mind = 0x100000;
                        s32 minpoll = 249;
                        u8 minvcm = 0;
                        if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        u8 regs_save_radio[2];
        u16 regs_save_phy[2];
 
-       s8 offset[4];
+       s32 offset[4];
        u8 core;
        u8 rail;
 
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        }
 
        for (i = 0; i < 4; i++) {
-               s32 mind = 40;
+               s32 mind = 0x100000;
                u8 minvcm = 0;
                s32 minpoll = 249;
                s32 curr;
index 21a824232478f2ac02d593495ac5fe82ff94df77..18d37645e2cde25f2d291a94bd51a3e3675db5be 100644 (file)
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        gain0_15 = ((biq1 & 0xf) << 12) |
                   ((tia & 0xf) << 8) |
                   ((lna2 & 0x3) << 6) |
-                  ((lna2 & 0x3) << 4) |
-                  ((lna1 & 0x3) << 2) |
-                  ((lna1 & 0x3) << 0);
+                  ((lna2 &
+                    0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
 
        mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
        mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        }
 
        mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
-       mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
-       mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
 
 }
 
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
        return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
 }
 
-static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
-                                     u16 tia_gain, u16 lna2_gain)
-{
-       u32 i_thresh_l, q_thresh_l;
-       u32 i_thresh_h, q_thresh_h;
-       struct lcnphy_iq_est iq_est_h, iq_est_l;
-
-       wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
-                                              lna2_gain, 0);
-
-       wlc_lcnphy_rx_gain_override_enable(pi, true);
-       wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
-               return false;
-
-       wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
-               return false;
-
-       i_thresh_l = (iq_est_l.i_pwr << 1);
-       i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
-
-       q_thresh_l = (iq_est_l.q_pwr << 1);
-       q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
-       if ((iq_est_h.i_pwr > i_thresh_l) &&
-           (iq_est_h.i_pwr < i_thresh_h) &&
-           (iq_est_h.q_pwr > q_thresh_l) &&
-           (iq_est_h.q_pwr < q_thresh_h))
-               return true;
-
-       return false;
-}
-
 static bool
 wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                     const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
            RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
            rfoverride3_old, rfoverride3val_old, rfoverride4_old,
            rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
-       int tia_gain, lna2_gain, biq1_gain;
-       bool set_gain;
+       int tia_gain;
+       u32 received_power, rx_pwr_threshold;
        u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
        u16 values_to_save[11];
        s16 *ptr;
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                goto cal_done;
        }
 
-       WARN_ON(module != 1);
-       tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
-
-       for (i = 0; i < 11; i++)
-               values_to_save[i] =
-                       read_radio_reg(pi, rxiq_cal_rf_reg[i]);
-       Core1TxControl_old = read_phy_reg(pi, 0x631);
-
-       or_phy_reg(pi, 0x631, 0x0015);
-
-       RFOverride0_old = read_phy_reg(pi, 0x44c);
-       RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
-       rfoverride2_old = read_phy_reg(pi, 0x4b0);
-       rfoverride2val_old = read_phy_reg(pi, 0x4b1);
-       rfoverride3_old = read_phy_reg(pi, 0x4f9);
-       rfoverride3val_old = read_phy_reg(pi, 0x4fa);
-       rfoverride4_old = read_phy_reg(pi, 0x938);
-       rfoverride4val_old = read_phy_reg(pi, 0x939);
-       afectrlovr_old = read_phy_reg(pi, 0x43b);
-       afectrlovrval_old = read_phy_reg(pi, 0x43c);
-       old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
-       old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
-       tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
-       if (tx_gain_override_old) {
-               wlc_lcnphy_get_tx_gain(pi, &old_gains);
-               tx_gain_index_old = pi_lcn->lcnphy_current_index;
-       }
-
-       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+       if (module == 1) {
 
-       mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+               tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+               for (i = 0; i < 11; i++)
+                       values_to_save[i] =
+                               read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+               Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+               or_phy_reg(pi, 0x631, 0x0015);
+
+               RFOverride0_old = read_phy_reg(pi, 0x44c);
+               RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+               rfoverride2_old = read_phy_reg(pi, 0x4b0);
+               rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+               rfoverride3_old = read_phy_reg(pi, 0x4f9);
+               rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+               rfoverride4_old = read_phy_reg(pi, 0x938);
+               rfoverride4val_old = read_phy_reg(pi, 0x939);
+               afectrlovr_old = read_phy_reg(pi, 0x43b);
+               afectrlovrval_old = read_phy_reg(pi, 0x43c);
+               old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+               old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+
+               tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+               if (tx_gain_override_old) {
+                       wlc_lcnphy_get_tx_gain(pi, &old_gains);
+                       tx_gain_index_old = pi_lcn->lcnphy_current_index;
+               }
 
-       write_radio_reg(pi, RADIO_2064_REG116, 0x06);
-       write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
-       write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
-       write_radio_reg(pi, RADIO_2064_REG098, 0x03);
-       write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
-       mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
-       write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG114, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
-       write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
-       mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
-       mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
-       mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
 
-       write_phy_reg(pi, 0x6da, 0xffff);
-       or_phy_reg(pi, 0x6db, 0x3);
+               mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
 
-       wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
-       set_gain = false;
-
-       lna2_gain = 3;
-       while ((lna2_gain >= 0) && !set_gain) {
-               tia_gain = 4;
-
-               while ((tia_gain >= 0) && !set_gain) {
-                       biq1_gain = 6;
-
-                       while ((biq1_gain >= 0) && !set_gain) {
-                               set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
-                                                                    (u16)
-                                                                    biq1_gain,
-                                                                    (u16)
-                                                                    tia_gain,
-                                                                    (u16)
-                                                                    lna2_gain);
-                               biq1_gain -= 1;
-                       }
+               write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+               write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+               write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+               write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+               write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+               mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+               write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+               write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+               mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+               mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+
+               mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+               wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
+               write_phy_reg(pi, 0x6da, 0xffff);
+               or_phy_reg(pi, 0x6db, 0x3);
+               wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+               wlc_lcnphy_rx_gain_override_enable(pi, true);
+
+               tia_gain = 8;
+               rx_pwr_threshold = 950;
+               while (tia_gain > 0) {
                        tia_gain -= 1;
+                       wlc_lcnphy_set_rx_gain_by_distribution(pi,
+                                                              0, 0, 2, 2,
+                                                              (u16)
+                                                              tia_gain, 1, 0);
+                       udelay(500);
+
+                       received_power =
+                               wlc_lcnphy_measure_digital_power(pi, 2000);
+                       if (received_power < rx_pwr_threshold)
+                               break;
                }
-               lna2_gain -= 1;
-       }
+               result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
 
-       if (set_gain)
-               result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
-       else
-               result = false;
+               wlc_lcnphy_stop_tx_tone(pi);
 
-       wlc_lcnphy_stop_tx_tone(pi);
+               write_phy_reg(pi, 0x631, Core1TxControl_old);
 
-       write_phy_reg(pi, 0x631, Core1TxControl_old);
-
-       write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x4b0, rfoverride2_old);
-       write_phy_reg(pi, 0x4b1, rfoverride2val_old);
-       write_phy_reg(pi, 0x4f9, rfoverride3_old);
-       write_phy_reg(pi, 0x4fa, rfoverride3val_old);
-       write_phy_reg(pi, 0x938, rfoverride4_old);
-       write_phy_reg(pi, 0x939, rfoverride4val_old);
-       write_phy_reg(pi, 0x43b, afectrlovr_old);
-       write_phy_reg(pi, 0x43c, afectrlovrval_old);
-       write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
-       write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+               write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x4b0, rfoverride2_old);
+               write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+               write_phy_reg(pi, 0x4f9, rfoverride3_old);
+               write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+               write_phy_reg(pi, 0x938, rfoverride4_old);
+               write_phy_reg(pi, 0x939, rfoverride4val_old);
+               write_phy_reg(pi, 0x43b, afectrlovr_old);
+               write_phy_reg(pi, 0x43c, afectrlovrval_old);
+               write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+               write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
 
-       wlc_lcnphy_clear_trsw_override(pi);
+               wlc_lcnphy_clear_trsw_override(pi);
 
-       mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+               mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
 
-       for (i = 0; i < 11; i++)
-               write_radio_reg(pi, rxiq_cal_rf_reg[i],
-                               values_to_save[i]);
+               for (i = 0; i < 11; i++)
+                       write_radio_reg(pi, rxiq_cal_rf_reg[i],
+                                       values_to_save[i]);
 
-       if (tx_gain_override_old)
-               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
-       else
-               wlc_lcnphy_disable_tx_gain_override(pi);
+               if (tx_gain_override_old)
+                       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+               else
+                       wlc_lcnphy_disable_tx_gain_override(pi);
 
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
-       wlc_lcnphy_rx_gain_override_enable(pi, false);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+               wlc_lcnphy_rx_gain_override_enable(pi, false);
+       }
 
 cal_done:
        kfree(ptr);
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
                write_radio_reg(pi, RADIO_2064_REG038, 3);
                write_radio_reg(pi, RADIO_2064_REG091, 7);
        }
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
-                       0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
-
-               write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
-               write_radio_reg(pi, RADIO_2064_REG091, 0x3);
-               write_radio_reg(pi, RADIO_2064_REG038, 0x3);
-
-               write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
-       }
 }
 
 static int
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
                } else {
                        mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
                        mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
-                       mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
                }
        } else {
                mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
                    (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
 
        mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
-       mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
 }
 
 static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 {
        struct phytbl_info tab;
        u32 rfseq, ind;
-       u8 tssi_sel;
 
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
 
-       if (pi->sh->boardflags & BFL_FEM) {
-               tssi_sel = 0x1;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
-       } else {
-               tssi_sel = 0xe;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
-       }
+       wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
        mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
 
        mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
        mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
+               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
                mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
        } else {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
                mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
                mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
        }
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
 
-       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-
        wlc_lcnphy_pwrctrl_rssiparams(pi);
 }
 
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
                read_radio_reg(pi, RADIO_2064_REG007) & 1;
        u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
        u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
-       u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
-
        idleTssi = read_phy_reg(pi, 0x4ab);
        suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
                         MCTL_EN_MAC));
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
        mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
        mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
        wlc_lcnphy_tssi_setup(pi);
-
-       mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
-       mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
-
-       wlc_lcnphy_set_bbmult(pi, 0x0);
-
        wlc_phy_do_dummy_tx(pi, true, OFF);
        idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
                    >> 0);
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
 
        mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
 
-       wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
        wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
        wlc_lcnphy_set_tx_gain(pi, &old_gains);
        wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
                        wlc_lcnphy_write_table(pi, &tab);
                        tab.tbl_offset++;
                }
-               mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
 
                mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
 
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
        target_gains.pad_gain = 21;
        target_gains.dac_gain = 0;
        wlc_lcnphy_set_tx_gain(pi, &target_gains);
+       wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
 
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
                                        lcnphy_recal ? LCNPHY_CAL_RECAL :
                                        LCNPHY_CAL_FULL), false);
        } else {
-               wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
                wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
        }
 
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
        if (CHSPEC_IS5G(pi->radio_chanspec))
                pa_gain = 0x70;
        else
-               pa_gain = 0x60;
+               pa_gain = 0x70;
 
        if (pi->sh->boardflags & BFL_FEM)
                pa_gain = 0x10;
-
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
        tab.tbl_len = 1;
        tab.tbl_ptr = &val;
 
        for (j = 0; j < 128; j++) {
-               if (pi->sh->boardflags & BFL_FEM)
-                       gm_gain = gain_table[j].gm;
-               else
-                       gm_gain = 15;
-
+               gm_gain = gain_table[j].gm;
                val = (((u32) pa_gain << 24) |
                       (gain_table[j].pad << 16) |
                       (gain_table[j].pga << 8) | gm_gain);
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
 
        write_phy_reg(pi, 0x4ea, 0x4688);
 
-       if (pi->sh->boardflags & BFL_FEM)
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
-       else
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
+       mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
 
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
        wlc_lcnphy_rcal(pi);
 
        wlc_lcnphy_rc_cal(pi);
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
-               write_radio_reg(pi, RADIO_2064_REG033, 0x19);
-               write_radio_reg(pi, RADIO_2064_REG039, 0xe);
-       }
-
 }
 
 static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
                wlc_lcnphy_write_table(pi, &tab);
        }
 
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
-               tab.tbl_width = 16;
-               tab.tbl_ptr = &val;
-               tab.tbl_len = 1;
+       tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+       tab.tbl_width = 16;
+       tab.tbl_ptr = &val;
+       tab.tbl_len = 1;
 
-               val = 150;
-               tab.tbl_offset = 0;
-               wlc_lcnphy_write_table(pi, &tab);
+       val = 114;
+       tab.tbl_offset = 0;
+       wlc_lcnphy_write_table(pi, &tab);
 
-               val = 220;
-               tab.tbl_offset = 1;
-               wlc_lcnphy_write_table(pi, &tab);
-       }
+       val = 130;
+       tab.tbl_offset = 1;
+       wlc_lcnphy_write_table(pi, &tab);
+
+       val = 6;
+       tab.tbl_offset = 8;
+       wlc_lcnphy_write_table(pi, &tab);
 
        if (CHSPEC_IS2G(pi->radio_chanspec)) {
                if (pi->sh->boardflags & BFL_FEM)
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
                wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
-       wlc_lcnphy_tssi_setup(pi);
 }
 
 void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
        if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
                return false;
 
-       if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+       if ((pi->sh->boardflags & BFL_FEM) &&
+           (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
                if (pi_lcn->lcnphy_tempsense_option == 3) {
                        pi->hwpwrctrl = true;
                        pi->hwpwrctrl_capable = true;
index b7e95acc2084033dd5c27de24b80bebd78f8dfbe..622c01ca72c5d24e8bfc55c3b84ad61633c4bb92 100644 (file)
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
index 3630a41df50d7c64815abae048696ea04329a703..c353b5f19c8c639480be47121f06c166dfc9f71c 100644 (file)
@@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il,
        dma_addr_t txcmd_phys;
        int txq_id = skb_get_queue_mapping(skb);
        u16 len, idx, hdr_len;
+       u16 firstlen, secondlen;
        u8 id;
        u8 unicast;
        u8 sta_id;
@@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il,
        len =
            sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
            hdr_len;
-       len = (len + 3) & ~3;
+       firstlen = (len + 3) & ~3;
 
        /* Physical address of this Tx command's header (not MAC header!),
         * within command buffer array. */
        txcmd_phys =
-           pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+           pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+                          PCI_DMA_TODEVICE);
        if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
                goto drop_unlock;
 
        /* Set up TFD's 2nd entry to point directly to remainder of skb,
         * if any (802.11 null frames have no payload). */
-       len = skb->len - hdr_len;
-       if (len) {
+       secondlen = skb->len - hdr_len;
+       if (secondlen > 0) {
                phys_addr =
-                   pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+                   pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
                                   PCI_DMA_TODEVICE);
                if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
                        goto drop_unlock;
@@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il,
 
        /* Add buffer containing Tx command and MAC(!) header to TFD's
         * first entry */
-       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
        dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, len);
-       if (len)
-               il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
-                                              U32_PAD(len));
+       dma_unmap_len_set(out_meta, len, firstlen);
+       if (secondlen > 0)
+               il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
+                                              U32_PAD(secondlen));
 
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                txq->need_update = 1;
index e8324b5e5bfe836610f5680f86cf9c742fc4061b..6c7493c2d698e6a2c83de679131890726aff6456 100644 (file)
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        int rate_idx;
        int i;
        u32 rate;
-       u8 use_green = il4965_rs_use_green(il, sta);
+       u8 use_green;
        u8 active_tbl = 0;
        u8 valid_tx_ant;
        struct il_station_priv *sta_priv;
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        if (!sta || !lq_sta)
                return;
 
+       use_green = il4965_rs_use_green(il, sta);
        sta_priv = (void *)sta->drv_priv;
 
        i = lq_sta->last_txrate_idx;
index 86ea5f4c39398077efa5e13c4a16a7dcf769bf05..44ca0e57f9f76fd8b3b58c183a630fdb057f78ad 100644 (file)
@@ -1261,6 +1261,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                return -EIO;
        }
 
+       /*
+        * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
+        * in iwl_down but cancel the workers only later.
+        */
+       if (!priv->ucode_loaded) {
+               IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
+               return -EIO;
+       }
+
        /*
         * Synchronous commands from this op-mode must hold
         * the mutex, this ensures we don't try to send two
index 736fe9bb140ebab643e065742469ef0a9f667e31..1a4ac9236a4446e2d8b30722cad23e826756a660 100644 (file)
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return -EIO;
        }
 
+       priv->ucode_loaded = true;
+
        if (ucode_type != IWL_UCODE_WOWLAN) {
                /* delay a bit to give rfkill time to run */
                msleep(5);
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return ret;
        }
 
-       priv->ucode_loaded = true;
-
        return 0;
 }
 
index 17bedc50e753d62613055eb3662c4095fe9f41bf..12c4f31ca8fbddcc95925cebf4b269b150a64ef2 100644 (file)
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* If platform's RF_KILL switch is NOT set to KILL */
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        if (hw_rfkill && !run_in_rfkill)
                return -ERFKILL;
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
        iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
        return 0;
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
                 * op_mode.
                 */
                hw_rfkill = iwl_is_rfkill_set(trans);
+               if (hw_rfkill)
+                       set_bit(STATUS_RFKILL, &trans_pcie->status);
+               else
+                       clear_bit(STATUS_RFKILL, &trans_pcie->status);
                iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        }
 }
index 8595c16f74deb8bdcf531725e3c65c0986d58745..cb5c6792e3a8de2679022370c140263b8e1e9e57 100644 (file)
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy = 0;
 
-               if (!cmd->len)
+               if (!cmd->len[i])
                        continue;
 
                /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
index 20a6c55558737b3bc3e3968856c5fe827780c688..b5c8b962ce12f74077fb6159b3e8558f521f10a5 100644 (file)
@@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                return -1;
        }
 
+       cmd_code = le16_to_cpu(host_cmd->command);
+       cmd_size = le16_to_cpu(host_cmd->size);
+
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
+           cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
+           cmd_code != HostCmd_CMD_FUNC_INIT) {
+               dev_err(adapter->dev,
+                       "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+                       cmd_code);
+               mwifiex_complete_cmd(adapter, cmd_node);
+               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               return -1;
+       }
+
        /* Set command sequence number */
        adapter->seq_num++;
        host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
@@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        adapter->curr_cmd = cmd_node;
        spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-       cmd_code = le16_to_cpu(host_cmd->command);
-       cmd_size = le16_to_cpu(host_cmd->size);
-
        /* Adjust skb length */
        if (cmd_node->cmd_skb->len > cmd_size)
                /*
@@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
 
        ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
                                     data_buf);
-       if (!ret)
-               ret = mwifiex_wait_queue_complete(adapter);
 
        return ret;
 }
@@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
        if (cmd_no == HostCmd_CMD_802_11_SCAN) {
                mwifiex_queue_scan_cmd(priv, cmd_node);
        } else {
-               adapter->cmd_queued = cmd_node;
                mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
                queue_work(adapter->workqueue, &adapter->main_work);
+               if (cmd_node->wait_q_enabled)
+                       ret = mwifiex_wait_queue_complete(adapter, cmd_node);
        }
 
        return ret;
index e38aa9b3663d0a76c0cfe1c7f0698f923fc58587..0ff4c37ab42ae6853dbe6c85f8bb33f476b531e1 100644 (file)
@@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                return ret;
        }
 
+       /* cancel current command */
+       if (adapter->curr_cmd) {
+               dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+               del_timer(&adapter->cmd_timer);
+               mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
+       }
+
        /* shut down mwifiex */
        dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
 
index 553adfb0aa81a35c468e44a07943b3f8fdf6e79e..7035ade9af74aeb08ceeea6b1545b28bd8e105d9 100644 (file)
@@ -723,7 +723,6 @@ struct mwifiex_adapter {
        u16 cmd_wait_q_required;
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
-       struct cmd_ctrl_node *cmd_queued;
        spinlock_t queue_lock;          /* lock for tx queues */
        struct completion fw_load;
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
@@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                        struct mwifiex_multicast_list *mcast_list);
 int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
                            struct net_device *dev);
-int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
+int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
+                               struct cmd_ctrl_node *cmd_queued);
 int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                      struct cfg80211_ssid *req_ssid);
 int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
index 5c395e2e6a2b874fb0257424663a73cb7abf0c17..feb20461339767df1c8912b216b9d62a1c11f5da 100644 (file)
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                }
                memcpy(adapter->upld_buf, skb->data,
                       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+               skb_push(skb, INTF_HEADER_LEN);
                if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
                                           PCI_DMA_FROMDEVICE))
                        return -1;
index bb60c2754a97ea54cc8c709206ba31a861e05d2e..d215b4d3c51b57db90eee2293f6c153059fe037b 100644 (file)
@@ -1388,10 +1388,13 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                        list_del(&cmd_node->list);
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
-                       adapter->cmd_queued = cmd_node;
                        mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
                                                        true);
                        queue_work(adapter->workqueue, &adapter->main_work);
+
+                       /* Perform internal scan synchronously */
+                       if (!priv->scan_request)
+                               mwifiex_wait_queue_complete(adapter, cmd_node);
                } else {
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
@@ -1946,9 +1949,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
                /* Normal scan */
                ret = mwifiex_scan_networks(priv, NULL);
 
-       if (!ret)
-               ret = mwifiex_wait_queue_complete(priv->adapter);
-
        up(&priv->async_sem);
 
        return ret;
index 9f33c92c90f5b8dc32bf9685bd3b708171b66acc..13100f8de3db1729b613333f16d21438104cd2e0 100644 (file)
@@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
  * This function waits on a cmd wait queue. It also cancels the pending
  * request after waking up, in case of errors.
  */
-int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
+                               struct cmd_ctrl_node *cmd_queued)
 {
        int status;
-       struct cmd_ctrl_node *cmd_queued;
-
-       if (!adapter->cmd_queued)
-               return 0;
-
-       cmd_queued = adapter->cmd_queued;
-       adapter->cmd_queued = NULL;
 
        dev_dbg(adapter->dev, "cmd pending\n");
        atomic_inc(&adapter->cmd_pending);
index 156b52732f3d576cb458bc3f6b3e2eb2818f2d90..5847d6d0881e7ce30180d26cc275425c55eda76b 100644 (file)
@@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (unlikely(!_urb)) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Can't allocate urb. Drop skb!\n");
+               kfree_skb(skb);
                return;
        }
        _rtl_submit_tx_urb(hw, _urb);
index c689c04a4f523248e9b2712e77d7e041f5b074d7..2d2f0a43d36b409cf287f6d6629b506e517948a9 100644 (file)
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
 
                /* special soc specific control */
                if (ctrl->mpp_get || ctrl->mpp_set) {
-                       if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) {
+                       if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
                                dev_err(&pdev->dev, "wrong soc control info\n");
                                return -EINVAL;
                        }
index ac8d382a79bbfe43de8769ea43d1c13ab2fac036..d611ecfcbf70d8401946767f01833069ef60c431 100644 (file)
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = {
 static int pinconf_dbg_state_print(struct seq_file *s, void *d)
 {
        if (strlen(dbg_state_name))
-               seq_printf(s, "%s\n", dbg_pinname);
+               seq_printf(s, "%s\n", dbg_state_name);
        else
                seq_printf(s, "No pin state set\n");
        return 0;
index e3ed8cb072a5a4dd410015c66e3202ded90dd49c..bfda73d64eed527b2fce7e0a084b7459943bfb1e 100644 (file)
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
  * pin config.
  */
 
-#ifdef CONFIG_GENERIC_PINCONF
+#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
 
 void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
                              struct seq_file *s, unsigned pin);
index caecdd37306126d7352b57365932b00b5e7ac1f2..c542a97c82f37cc743fa9d97e914c13dfe55aaa7 100644 (file)
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
        }
 
        /* check if pin use AlternateFunction register */
-       if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
+       if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED))
                return mode;
        /*
         * if pin GPIOSEL bit is set and pin supports alternate function,
index 1a00658b3ea070f336333160411f1b01bbc8f96e..bd83c8b01cd102e7c0f194c38389bb4a083c5dec 100644 (file)
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
        }
 
        if (!gpio_range) {
+               /*
+                * A pin should not be freed more times than allocated.
+                */
+               if (WARN_ON(!desc->mux_usecount))
+                       return NULL;
                desc->mux_usecount--;
                if (desc->mux_usecount)
                        return NULL;
index 5ac9c935c151511a1a9b42cf76dc5efd87c4f71f..e9b9c83928325fc708b72df751d51c08bf7bde82 100644 (file)
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
        case EQC_WR_PROHIBIT:
                spin_lock_irqsave(&bdev->lock, flags);
                if (bdev->state != SCM_WR_PROHIBIT)
-                       pr_info("%lu: Write access to the SCM increment is suspended\n",
+                       pr_info("%lx: Write access to the SCM increment is suspended\n",
                                (unsigned long) bdev->scmdev->address);
                bdev->state = SCM_WR_PROHIBIT;
                spin_unlock_irqrestore(&bdev->lock, flags);
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
 
        spin_lock_irqsave(&bdev->lock, flags);
        if (bdev->state == SCM_WR_PROHIBIT)
-               pr_info("%lu: Write access to the SCM increment is restored\n",
+               pr_info("%lx: Write access to the SCM increment is restored\n",
                        (unsigned long) bdev->scmdev->address);
        bdev->state = SCM_OPER;
        spin_unlock_irqrestore(&bdev->lock, flags);
@@ -463,12 +463,15 @@ static int __init scm_blk_init(void)
                goto out;
 
        scm_major = ret;
-       if (scm_alloc_rqs(nr_requests))
+       ret = scm_alloc_rqs(nr_requests);
+       if (ret)
                goto out_unreg;
 
        scm_debug = debug_register("scm_log", 16, 1, 16);
-       if (!scm_debug)
+       if (!scm_debug) {
+               ret = -ENOMEM;
                goto out_free;
+       }
 
        debug_register_view(scm_debug, &debug_hex_ascii_view);
        debug_set_level(scm_debug, 2);
index 5f6180d6ff08f50516215c20553f0c1ce5f4da42..c98cf52d78d19331adea46fc12cff94034f25b6b 100644 (file)
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)
 
        switch (event) {
        case SCM_CHANGE:
-               pr_info("%lu: The capabilities of the SCM increment changed\n",
+               pr_info("%lx: The capabilities of the SCM increment changed\n",
                        (unsigned long) scmdev->address);
                SCM_LOG(2, "State changed");
                SCM_LOG_STATE(2, scmdev);
index b907dba24025e12239960f97feb0c5510d9d33cd..cee69dac3e182b6e2de94300b49ac9ec35658430 100644 (file)
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
        int i, rc;
 
        /* Check if the tty3270 is already there. */
-       view = raw3270_find_view(&tty3270_fn, tty->index);
+       view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
        if (!IS_ERR(view)) {
                tp = container_of(view, struct tty3270, view);
                tty->driver_data = tp;
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                tp->inattr = TF_INPUT;
                return tty_port_install(&tp->port, driver, tty);
        }
-       if (tty3270_max_index < tty->index)
-               tty3270_max_index = tty->index;
+       if (tty3270_max_index < tty->index + 1)
+               tty3270_max_index = tty->index + 1;
 
        /* Allocate tty3270 structure on first open. */
        tp = tty3270_alloc_view();
        if (IS_ERR(tp))
                return PTR_ERR(tp);
 
-       rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
+       rc = raw3270_add_view(&tp->view, &tty3270_fn,
+                             tty->index + RAW3270_FIRSTMINOR);
        if (rc) {
                tty3270_free_view(tp);
                return rc;
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = {
 
 void tty3270_create_cb(int minor)
 {
-       tty_register_device(tty3270_driver, minor, NULL);
+       tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
 }
 
 void tty3270_destroy_cb(int minor)
 {
-       tty_unregister_device(tty3270_driver, minor);
+       tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
 }
 
 struct raw3270_notifier tty3270_notifier =
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void)
        driver->driver_name = "tty3270";
        driver->name = "3270/tty";
        driver->major = IBM_TTY3270_MAJOR;
-       driver->minor_start = 0;
+       driver->minor_start = RAW3270_FIRSTMINOR;
+       driver->name_base = RAW3270_FIRSTMINOR;
        driver->type = TTY_DRIVER_TYPE_SYSTEM;
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
index 2daf4b0da434c8711c4a7907916b1b8138c412f2..90bc7bd00966e9a87845bee59c1976a74d959fb3 100644 (file)
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
        fc_exch_init(lport);
        fc_rport_init(lport);
        fc_disc_init(lport);
+       fc_disc_config(lport, lport);
        return 0;
 }
 
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev,
        }
 
        ctlr = bnx2fc_to_ctlr(interface);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
        interface->vlan_id = vlan_id;
 
        interface->timer_work_queue =
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev,
                goto ifput_err;
        }
 
-       lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
+       lport = bnx2fc_if_create(interface, &cdev->dev, 0);
        if (!lport) {
                printk(KERN_ERR PFX "Failed to create interface (%s)\n",
                        netdev->name);
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev,
        /* Make this master N_port */
        ctlr->lp = lport;
 
-       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
-
        if (link_state == BNX2FC_CREATE_LINK_UP)
                cdev->enabled = FCOE_CTLR_ENABLED;
        else
index b5d92fc93c7023f6d0f395c5ce44c4589afeabea..9bfdc9a3f897a2e16dc9e0a34d5d6e47969c0d1c 100644 (file)
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
        struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
-       struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
 
        rtnl_lock();
        if (!fcoe->removed)
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
        /* tear-down the FCoE controller */
        fcoe_ctlr_destroy(fip);
        scsi_host_put(fip->lp->host);
-       fcoe_ctlr_device_delete(ctlr_dev);
        dev_put(netdev);
        module_put(THIS_MODULE);
 }
@@ -2194,6 +2192,8 @@ out_nodev:
  */
 static void fcoe_destroy_work(struct work_struct *work)
 {
+       struct fcoe_ctlr_device *cdev;
+       struct fcoe_ctlr *ctlr;
        struct fcoe_port *port;
        struct fcoe_interface *fcoe;
        struct Scsi_Host *shost;
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work)
        mutex_lock(&fcoe_config_mutex);
 
        fcoe = port->priv;
+       ctlr = fcoe_to_ctlr(fcoe);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
        fcoe_if_destroy(port->lport);
        fcoe_interface_cleanup(fcoe);
 
        mutex_unlock(&fcoe_config_mutex);
+
+       fcoe_ctlr_device_delete(cdev);
 }
 
 /**
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
                rc = -EIO;
                rtnl_unlock();
                fcoe_interface_cleanup(fcoe);
-               goto out_nortnl;
+               mutex_unlock(&fcoe_config_mutex);
+               fcoe_ctlr_device_delete(ctlr_dev);
+               goto out;
        }
 
        /* Make this the "master" N_Port */
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
 
 out_nodev:
        rtnl_unlock();
-out_nortnl:
        mutex_unlock(&fcoe_config_mutex);
+out:
        return rc;
 }
 
index 08c3bc398da2c1b234dadb62a56f1e1b47da5500..a76247201be570987fe28b44545184bd777e65c0 100644 (file)
@@ -2814,6 +2814,47 @@ unlock:
                fc_lport_set_local_id(fip->lp, new_port_id);
 }
 
+/**
+ * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode
+ * @lport: The local port to be (re)configured
+ * @fip:   The FCoE controller whose mode is changing
+ * @fip_mode: The new fip mode
+ *
+ * Note that the we shouldn't be changing the libfc discovery settings
+ * (fc_disc_config) while an lport is going through the libfc state
+ * machine. The mode can only be changed when a fcoe_ctlr device is
+ * disabled, so that should ensure that this routine is only called
+ * when nothing is happening.
+ */
+void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
+                       enum fip_state fip_mode)
+{
+       void *priv;
+
+       WARN_ON(lport->state != LPORT_ST_RESET &&
+               lport->state != LPORT_ST_DISABLED);
+
+       if (fip_mode == FIP_MODE_VN2VN) {
+               lport->rport_priv_size = sizeof(struct fcoe_rport);
+               lport->point_to_multipoint = 1;
+               lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
+               lport->tt.disc_start = fcoe_ctlr_disc_start;
+               lport->tt.disc_stop = fcoe_ctlr_disc_stop;
+               lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
+               priv = fip;
+       } else {
+               lport->rport_priv_size = 0;
+               lport->point_to_multipoint = 0;
+               lport->tt.disc_recv_req = NULL;
+               lport->tt.disc_start = NULL;
+               lport->tt.disc_stop = NULL;
+               lport->tt.disc_stop_final = NULL;
+               priv = lport;
+       }
+
+       fc_disc_config(lport, priv);
+}
+
 /**
  * fcoe_libfc_config() - Sets up libfc related properties for local port
  * @lport:    The local port to configure libfc for
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
        fc_exch_init(lport);
        fc_elsct_init(lport);
        fc_lport_init(lport);
-       if (fip->mode == FIP_MODE_VN2VN)
-               lport->rport_priv_size = sizeof(struct fcoe_rport);
        fc_rport_init(lport);
-       if (fip->mode == FIP_MODE_VN2VN) {
-               lport->point_to_multipoint = 1;
-               lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
-               lport->tt.disc_start = fcoe_ctlr_disc_start;
-               lport->tt.disc_stop = fcoe_ctlr_disc_stop;
-               lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
-               mutex_init(&lport->disc.disc_mutex);
-               INIT_LIST_HEAD(&lport->disc.rports);
-               lport->disc.priv = fip;
-       } else {
-               fc_disc_init(lport);
-       }
+       fc_disc_init(lport);
+       fcoe_ctlr_mode_set(lport, fip, fip->mode);
        return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected);
 void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
 {
        struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+       struct fc_lport *lport = ctlr->lp;
 
        mutex_lock(&ctlr->ctlr_mutex);
        switch (ctlr_dev->mode) {
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
        }
 
        mutex_unlock(&ctlr->ctlr_mutex);
+
+       fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);
 }
 EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
index 8e561e6a557cd956188366488d5b4ed3dfe66021..880a9068ca1268baf851ea8804f3bdb4216049f6 100644 (file)
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport)
 }
 
 /**
- * fc_disc_init() - Initialize the discovery layer for a local port
- * @lport: The local port that needs the discovery layer to be initialized
+ * fc_disc_config() - Configure the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be configured
+ * @priv: Private data structre for users of the discovery layer
  */
-int fc_disc_init(struct fc_lport *lport)
+void fc_disc_config(struct fc_lport *lport, void *priv)
 {
-       struct fc_disc *disc;
+       struct fc_disc *disc = &lport->disc;
 
        if (!lport->tt.disc_start)
                lport->tt.disc_start = fc_disc_start;
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport)
                lport->tt.disc_recv_req = fc_disc_recv_req;
 
        disc = &lport->disc;
+
+       disc->priv = priv;
+}
+EXPORT_SYMBOL(fc_disc_config);
+
+/**
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
+ */
+void fc_disc_init(struct fc_lport *lport)
+{
+       struct fc_disc *disc = &lport->disc;
+
        INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
        mutex_init(&disc->disc_mutex);
        INIT_LIST_HEAD(&disc->rports);
-
-       disc->priv = lport;
-
-       return 0;
 }
 EXPORT_SYMBOL(fc_disc_init);
index f80eee74a3116c5a8935378d908fce904e3351f6..2be0de920d6756345de9cc78e04a7c1136ef9d4d 100644 (file)
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers"
 
 config SPI_ALTERA
        tristate "Altera SPI Controller"
+       depends on GENERIC_HARDIRQS
        select SPI_BITBANG
        help
          This is the driver for the Altera SPI Controller.
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA
 
 config SPI_PXA2XX
        tristate "PXA2xx SSP SPI master"
-       depends on ARCH_PXA || PCI || ACPI
+       depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
        select PXA_SSP if ARCH_PXA
        help
          This enables using a PXA2xx or Sodaville SSP port as a SPI master
index 81a1fe661579d365ff82dfac8c0de06b53045110..71a73ec5af8d90811faebf7d087c705231a0a316 100644 (file)
@@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        case TRIG_NONE:
                /*  continous acquisition */
                devpriv->ai_continous = 1;
-               devpriv->ai_sample_count = 0;
+               devpriv->ai_sample_count = 1;
                break;
        }
 
index 73582705e8c5bc71081494a7e4481b7eb36d42e8..5c3714530961c5ede430f7745706d24c833781ce 100644 (file)
@@ -15,7 +15,7 @@ config RAMSTER
        depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y
        depends on NET
        # must ensure struct page is 8-byte aligned
-       select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
+       select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
        default n
        help
          RAMster allows RAM on other machines in a cluster to be utilized
index 2030b608136d3cfb7dc4509105ff061a3a621e14..3243ea790eaba9ee0db1b4152ac4e3450148ecd3 100644 (file)
@@ -1139,8 +1139,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
                return ret;
 
        ret = target_check_reservation(cmd);
-       if (ret)
+       if (ret) {
+               cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
                return ret;
+       }
 
        ret = dev->transport->parse_cdb(cmd);
        if (ret)
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
deleted file mode 100644 (file)
index cf6a538..0000000
+++ /dev/null
@@ -1,3448 +0,0 @@
-/*
- *  Driver for 8250/16550-type serial ports
- *
- *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- *
- *  Copyright (C) 2001 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * A note about mapbase / membase
- *
- *  mapbase is the physical address of the IO port.
- *  membase is an 'ioremapped' cookie.
- */
-
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/tty.h>
-#include <linux/ratelimit.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-#include <linux/nmi.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#ifdef CONFIG_SPARC
-#include <linux/sunserialcore.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "8250.h"
-
-/*
- * Configuration:
- *   share_irqs - whether we pass IRQF_SHARED to request_irq().  This option
- *                is unsafe when used on edge-triggered interrupts.
- */
-static unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
-
-static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
-
-static struct uart_driver serial8250_reg;
-
-static int serial_index(struct uart_port *port)
-{
-       return (serial8250_reg.minor - 64) + port->line;
-}
-
-static unsigned int skip_txen_test; /* force skip of txen test at init time */
-
-/*
- * Debugging.
- */
-#if 0
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#if 0
-#define DEBUG_INTR(fmt...)     printk(fmt)
-#else
-#define DEBUG_INTR(fmt...)     do { } while (0)
-#endif
-
-#define PASS_LIMIT     512
-
-#define BOTH_EMPTY     (UART_LSR_TEMT | UART_LSR_THRE)
-
-
-#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
-#define CONFIG_SERIAL_DETECT_IRQ 1
-#endif
-#ifdef CONFIG_SERIAL_8250_MANY_PORTS
-#define CONFIG_SERIAL_MANY_PORTS 1
-#endif
-
-/*
- * HUB6 is always on.  This will be removed once the header
- * files have been cleaned.
- */
-#define CONFIG_HUB6 1
-
-#include <asm/serial.h>
-/*
- * SERIAL_PORT_DFNS tells us about built-in ports that have no
- * standard enumeration mechanism.   Platforms that can find all
- * serial ports via mechanisms like ACPI or PCI need not supply it.
- */
-#ifndef SERIAL_PORT_DFNS
-#define SERIAL_PORT_DFNS
-#endif
-
-static const struct old_serial_port old_serial_port[] = {
-       SERIAL_PORT_DFNS /* defined in asm/serial.h */
-};
-
-#define UART_NR        CONFIG_SERIAL_8250_NR_UARTS
-
-#ifdef CONFIG_SERIAL_8250_RSA
-
-#define PORT_RSA_MAX 4
-static unsigned long probe_rsa[PORT_RSA_MAX];
-static unsigned int probe_rsa_count;
-#endif /* CONFIG_SERIAL_8250_RSA  */
-
-struct irq_info {
-       struct                  hlist_node node;
-       int                     irq;
-       spinlock_t              lock;   /* Protects list not the hash */
-       struct list_head        *head;
-};
-
-#define NR_IRQ_HASH            32      /* Can be adjusted later */
-static struct hlist_head irq_lists[NR_IRQ_HASH];
-static DEFINE_MUTEX(hash_mutex);       /* Used to walk the hash */
-
-/*
- * Here we define the default xmit fifo size used for each type of UART.
- */
-static const struct serial8250_config uart_config[] = {
-       [PORT_UNKNOWN] = {
-               .name           = "unknown",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_8250] = {
-               .name           = "8250",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_16450] = {
-               .name           = "16450",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_16550] = {
-               .name           = "16550",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_16550A] = {
-               .name           = "16550A",
-               .fifo_size      = 16,
-               .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO,
-       },
-       [PORT_CIRRUS] = {
-               .name           = "Cirrus",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_16650] = {
-               .name           = "ST16650",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-       },
-       [PORT_16650V2] = {
-               .name           = "ST16650V2",
-               .fifo_size      = 32,
-               .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-                                 UART_FCR_T_TRIG_00,
-               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-       },
-       [PORT_16750] = {
-               .name           = "TI16750",
-               .fifo_size      = 64,
-               .tx_loadsz      = 64,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
-                                 UART_FCR7_64BYTE,
-               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
-       },
-       [PORT_STARTECH] = {
-               .name           = "Startech",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1,
-       },
-       [PORT_16C950] = {
-               .name           = "16C950/954",
-               .fifo_size      = 128,
-               .tx_loadsz      = 128,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               /* UART_CAP_EFR breaks billionon CF bluetooth card. */
-               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP,
-       },
-       [PORT_16654] = {
-               .name           = "ST16654",
-               .fifo_size      = 64,
-               .tx_loadsz      = 32,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-                                 UART_FCR_T_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-       },
-       [PORT_16850] = {
-               .name           = "XR16850",
-               .fifo_size      = 128,
-               .tx_loadsz      = 128,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-       },
-       [PORT_RSA] = {
-               .name           = "RSA",
-               .fifo_size      = 2048,
-               .tx_loadsz      = 2048,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
-               .flags          = UART_CAP_FIFO,
-       },
-       [PORT_NS16550A] = {
-               .name           = "NS16550A",
-               .fifo_size      = 16,
-               .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_NATSEMI,
-       },
-       [PORT_XSCALE] = {
-               .name           = "XScale",
-               .fifo_size      = 32,
-               .tx_loadsz      = 32,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
-       },
-       [PORT_OCTEON] = {
-               .name           = "OCTEON",
-               .fifo_size      = 64,
-               .tx_loadsz      = 64,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO,
-       },
-       [PORT_AR7] = {
-               .name           = "AR7",
-               .fifo_size      = 16,
-               .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-       [PORT_U6_16550A] = {
-               .name           = "U6_16550A",
-               .fifo_size      = 64,
-               .tx_loadsz      = 64,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-       [PORT_TEGRA] = {
-               .name           = "Tegra",
-               .fifo_size      = 32,
-               .tx_loadsz      = 8,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-                                 UART_FCR_T_TRIG_01,
-               .flags          = UART_CAP_FIFO | UART_CAP_RTOIE,
-       },
-       [PORT_XR17D15X] = {
-               .name           = "XR17D15X",
-               .fifo_size      = 64,
-               .tx_loadsz      = 64,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
-                                 UART_CAP_SLEEP,
-       },
-       [PORT_XR17V35X] = {
-               .name           = "XR17V35X",
-               .fifo_size      = 256,
-               .tx_loadsz      = 256,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
-                                 UART_FCR_T_TRIG_11,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
-                                 UART_CAP_SLEEP,
-       },
-       [PORT_LPC3220] = {
-               .name           = "LPC3220",
-               .fifo_size      = 64,
-               .tx_loadsz      = 32,
-               .fcr            = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
-                                 UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
-               .flags          = UART_CAP_FIFO,
-       },
-       [PORT_BRCM_TRUMANAGE] = {
-               .name           = "TruManage",
-               .fifo_size      = 1,
-               .tx_loadsz      = 1024,
-               .flags          = UART_CAP_HFIFO,
-       },
-       [PORT_8250_CIR] = {
-               .name           = "CIR port"
-       },
-       [PORT_ALTR_16550_F32] = {
-               .name           = "Altera 16550 FIFO32",
-               .fifo_size      = 32,
-               .tx_loadsz      = 32,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-       [PORT_ALTR_16550_F64] = {
-               .name           = "Altera 16550 FIFO64",
-               .fifo_size      = 64,
-               .tx_loadsz      = 64,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-       [PORT_ALTR_16550_F128] = {
-               .name           = "Altera 16550 FIFO128",
-               .fifo_size      = 128,
-               .tx_loadsz      = 128,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
-       },
-};
-
-/* Uart divisor latch read */
-static int default_serial_dl_read(struct uart_8250_port *up)
-{
-       return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
-}
-
-/* Uart divisor latch write */
-static void default_serial_dl_write(struct uart_8250_port *up, int value)
-{
-       serial_out(up, UART_DLL, value & 0xff);
-       serial_out(up, UART_DLM, value >> 8 & 0xff);
-}
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-
-/* Au1x00/RT288x UART hardware has a weird register layout */
-static const u8 au_io_in_map[] = {
-       [UART_RX]  = 0,
-       [UART_IER] = 2,
-       [UART_IIR] = 3,
-       [UART_LCR] = 5,
-       [UART_MCR] = 6,
-       [UART_LSR] = 7,
-       [UART_MSR] = 8,
-};
-
-static const u8 au_io_out_map[] = {
-       [UART_TX]  = 1,
-       [UART_IER] = 2,
-       [UART_FCR] = 4,
-       [UART_LCR] = 5,
-       [UART_MCR] = 6,
-};
-
-static unsigned int au_serial_in(struct uart_port *p, int offset)
-{
-       offset = au_io_in_map[offset] << p->regshift;
-       return __raw_readl(p->membase + offset);
-}
-
-static void au_serial_out(struct uart_port *p, int offset, int value)
-{
-       offset = au_io_out_map[offset] << p->regshift;
-       __raw_writel(value, p->membase + offset);
-}
-
-/* Au1x00 haven't got a standard divisor latch */
-static int au_serial_dl_read(struct uart_8250_port *up)
-{
-       return __raw_readl(up->port.membase + 0x28);
-}
-
-static void au_serial_dl_write(struct uart_8250_port *up, int value)
-{
-       __raw_writel(value, up->port.membase + 0x28);
-}
-
-#endif
-
-static unsigned int hub6_serial_in(struct uart_port *p, int offset)
-{
-       offset = offset << p->regshift;
-       outb(p->hub6 - 1 + offset, p->iobase);
-       return inb(p->iobase + 1);
-}
-
-static void hub6_serial_out(struct uart_port *p, int offset, int value)
-{
-       offset = offset << p->regshift;
-       outb(p->hub6 - 1 + offset, p->iobase);
-       outb(value, p->iobase + 1);
-}
-
-static unsigned int mem_serial_in(struct uart_port *p, int offset)
-{
-       offset = offset << p->regshift;
-       return readb(p->membase + offset);
-}
-
-static void mem_serial_out(struct uart_port *p, int offset, int value)
-{
-       offset = offset << p->regshift;
-       writeb(value, p->membase + offset);
-}
-
-static void mem32_serial_out(struct uart_port *p, int offset, int value)
-{
-       offset = offset << p->regshift;
-       writel(value, p->membase + offset);
-}
-
-static unsigned int mem32_serial_in(struct uart_port *p, int offset)
-{
-       offset = offset << p->regshift;
-       return readl(p->membase + offset);
-}
-
-static unsigned int io_serial_in(struct uart_port *p, int offset)
-{
-       offset = offset << p->regshift;
-       return inb(p->iobase + offset);
-}
-
-static void io_serial_out(struct uart_port *p, int offset, int value)
-{
-       offset = offset << p->regshift;
-       outb(value, p->iobase + offset);
-}
-
-static int serial8250_default_handle_irq(struct uart_port *port);
-static int exar_handle_irq(struct uart_port *port);
-
-static void set_io_from_upio(struct uart_port *p)
-{
-       struct uart_8250_port *up =
-               container_of(p, struct uart_8250_port, port);
-
-       up->dl_read = default_serial_dl_read;
-       up->dl_write = default_serial_dl_write;
-
-       switch (p->iotype) {
-       case UPIO_HUB6:
-               p->serial_in = hub6_serial_in;
-               p->serial_out = hub6_serial_out;
-               break;
-
-       case UPIO_MEM:
-               p->serial_in = mem_serial_in;
-               p->serial_out = mem_serial_out;
-               break;
-
-       case UPIO_MEM32:
-               p->serial_in = mem32_serial_in;
-               p->serial_out = mem32_serial_out;
-               break;
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-       case UPIO_AU:
-               p->serial_in = au_serial_in;
-               p->serial_out = au_serial_out;
-               up->dl_read = au_serial_dl_read;
-               up->dl_write = au_serial_dl_write;
-               break;
-#endif
-
-       default:
-               p->serial_in = io_serial_in;
-               p->serial_out = io_serial_out;
-               break;
-       }
-       /* Remember loaded iotype */
-       up->cur_iotype = p->iotype;
-       p->handle_irq = serial8250_default_handle_irq;
-}
-
-static void
-serial_port_out_sync(struct uart_port *p, int offset, int value)
-{
-       switch (p->iotype) {
-       case UPIO_MEM:
-       case UPIO_MEM32:
-       case UPIO_AU:
-               p->serial_out(p, offset, value);
-               p->serial_in(p, UART_LCR);      /* safe, no side-effects */
-               break;
-       default:
-               p->serial_out(p, offset, value);
-       }
-}
-
-/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
-       serial_out(up, UART_SCR, offset);
-       serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
-       unsigned int value;
-
-       serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
-       serial_out(up, UART_SCR, offset);
-       value = serial_in(up, UART_ICR);
-       serial_icr_write(up, UART_ACR, up->acr);
-
-       return value;
-}
-
-/*
- * FIFO support.
- */
-static void serial8250_clear_fifos(struct uart_8250_port *p)
-{
-       if (p->capabilities & UART_CAP_FIFO) {
-               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
-               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
-                              UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-               serial_out(p, UART_FCR, 0);
-       }
-}
-
-void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
-{
-       unsigned char fcr;
-
-       serial8250_clear_fifos(p);
-       fcr = uart_config[p->port.type].fcr;
-       serial_out(p, UART_FCR, fcr);
-}
-EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
-
-/*
- * IER sleep support.  UARTs which have EFRs need the "extended
- * capability" bit enabled.  Note that on XR16C850s, we need to
- * reset LCR to write to IER.
- */
-static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
-{
-       /*
-        * Exar UARTs have a SLEEP register that enables or disables
-        * each UART to enter sleep mode separately.  On the XR17V35x the
-        * register is accessible to each UART at the UART_EXAR_SLEEP
-        * offset but the UART channel may only write to the corresponding
-        * bit.
-        */
-       if ((p->port.type == PORT_XR17V35X) ||
-          (p->port.type == PORT_XR17D15X)) {
-               serial_out(p, UART_EXAR_SLEEP, 0xff);
-               return;
-       }
-
-       if (p->capabilities & UART_CAP_SLEEP) {
-               if (p->capabilities & UART_CAP_EFR) {
-                       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
-                       serial_out(p, UART_EFR, UART_EFR_ECB);
-                       serial_out(p, UART_LCR, 0);
-               }
-               serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
-               if (p->capabilities & UART_CAP_EFR) {
-                       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
-                       serial_out(p, UART_EFR, 0);
-                       serial_out(p, UART_LCR, 0);
-               }
-       }
-}
-
-#ifdef CONFIG_SERIAL_8250_RSA
-/*
- * Attempts to turn on the RSA FIFO.  Returns zero on failure.
- * We set the port uart clock rate if we succeed.
- */
-static int __enable_rsa(struct uart_8250_port *up)
-{
-       unsigned char mode;
-       int result;
-
-       mode = serial_in(up, UART_RSA_MSR);
-       result = mode & UART_RSA_MSR_FIFO;
-
-       if (!result) {
-               serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
-               mode = serial_in(up, UART_RSA_MSR);
-               result = mode & UART_RSA_MSR_FIFO;
-       }
-
-       if (result)
-               up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
-
-       return result;
-}
-
-static void enable_rsa(struct uart_8250_port *up)
-{
-       if (up->port.type == PORT_RSA) {
-               if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
-                       spin_lock_irq(&up->port.lock);
-                       __enable_rsa(up);
-                       spin_unlock_irq(&up->port.lock);
-               }
-               if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
-                       serial_out(up, UART_RSA_FRR, 0);
-       }
-}
-
-/*
- * Attempts to turn off the RSA FIFO.  Returns zero on failure.
- * It is unknown why interrupts were disabled in here.  However,
- * the caller is expected to preserve this behaviour by grabbing
- * the spinlock before calling this function.
- */
-static void disable_rsa(struct uart_8250_port *up)
-{
-       unsigned char mode;
-       int result;
-
-       if (up->port.type == PORT_RSA &&
-           up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
-               spin_lock_irq(&up->port.lock);
-
-               mode = serial_in(up, UART_RSA_MSR);
-               result = !(mode & UART_RSA_MSR_FIFO);
-
-               if (!result) {
-                       serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
-                       mode = serial_in(up, UART_RSA_MSR);
-                       result = !(mode & UART_RSA_MSR_FIFO);
-               }
-
-               if (result)
-                       up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
-               spin_unlock_irq(&up->port.lock);
-       }
-}
-#endif /* CONFIG_SERIAL_8250_RSA */
-
-/*
- * This is a quickie test to see how big the FIFO is.
- * It doesn't work at all the time, more's the pity.
- */
-static int size_fifo(struct uart_8250_port *up)
-{
-       unsigned char old_fcr, old_mcr, old_lcr;
-       unsigned short old_dl;
-       int count;
-
-       old_lcr = serial_in(up, UART_LCR);
-       serial_out(up, UART_LCR, 0);
-       old_fcr = serial_in(up, UART_FCR);
-       old_mcr = serial_in(up, UART_MCR);
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-                   UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-       serial_out(up, UART_MCR, UART_MCR_LOOP);
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       old_dl = serial_dl_read(up);
-       serial_dl_write(up, 0x0001);
-       serial_out(up, UART_LCR, 0x03);
-       for (count = 0; count < 256; count++)
-               serial_out(up, UART_TX, count);
-       mdelay(20);/* FIXME - schedule_timeout */
-       for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
-            (count < 256); count++)
-               serial_in(up, UART_RX);
-       serial_out(up, UART_FCR, old_fcr);
-       serial_out(up, UART_MCR, old_mcr);
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       serial_dl_write(up, old_dl);
-       serial_out(up, UART_LCR, old_lcr);
-
-       return count;
-}
-
-/*
- * Read UART ID using the divisor method - set DLL and DLM to zero
- * and the revision will be in DLL and device type in DLM.  We
- * preserve the device state across this.
- */
-static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
-{
-       unsigned char old_dll, old_dlm, old_lcr;
-       unsigned int id;
-
-       old_lcr = serial_in(p, UART_LCR);
-       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
-
-       old_dll = serial_in(p, UART_DLL);
-       old_dlm = serial_in(p, UART_DLM);
-
-       serial_out(p, UART_DLL, 0);
-       serial_out(p, UART_DLM, 0);
-
-       id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
-       serial_out(p, UART_DLL, old_dll);
-       serial_out(p, UART_DLM, old_dlm);
-       serial_out(p, UART_LCR, old_lcr);
-
-       return id;
-}
-
-/*
- * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
- * When this function is called we know it is at least a StarTech
- * 16650 V2, but it might be one of several StarTech UARTs, or one of
- * its clones.  (We treat the broken original StarTech 16650 V1 as a
- * 16550, and why not?  Startech doesn't seem to even acknowledge its
- * existence.)
- *
- * What evil have men's minds wrought...
- */
-static void autoconfig_has_efr(struct uart_8250_port *up)
-{
-       unsigned int id1, id2, id3, rev;
-
-       /*
-        * Everything with an EFR has SLEEP
-        */
-       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
-
-       /*
-        * First we check to see if it's an Oxford Semiconductor UART.
-        *
-        * If we have to do this here because some non-National
-        * Semiconductor clone chips lock up if you try writing to the
-        * LSR register (which serial_icr_read does)
-        */
-
-       /*
-        * Check for Oxford Semiconductor 16C950.
-        *
-        * EFR [4] must be set else this test fails.
-        *
-        * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
-        * claims that it's needed for 952 dual UART's (which are not
-        * recommended for new designs).
-        */
-       up->acr = 0;
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-       serial_out(up, UART_EFR, UART_EFR_ECB);
-       serial_out(up, UART_LCR, 0x00);
-       id1 = serial_icr_read(up, UART_ID1);
-       id2 = serial_icr_read(up, UART_ID2);
-       id3 = serial_icr_read(up, UART_ID3);
-       rev = serial_icr_read(up, UART_REV);
-
-       DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
-
-       if (id1 == 0x16 && id2 == 0xC9 &&
-           (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
-               up->port.type = PORT_16C950;
-
-               /*
-                * Enable work around for the Oxford Semiconductor 952 rev B
-                * chip which causes it to seriously miscalculate baud rates
-                * when DLL is 0.
-                */
-               if (id3 == 0x52 && rev == 0x01)
-                       up->bugs |= UART_BUG_QUOT;
-               return;
-       }
-
-       /*
-        * We check for a XR16C850 by setting DLL and DLM to 0, and then
-        * reading back DLL and DLM.  The chip type depends on the DLM
-        * value read back:
-        *  0x10 - XR16C850 and the DLL contains the chip revision.
-        *  0x12 - XR16C2850.
-        *  0x14 - XR16C854.
-        */
-       id1 = autoconfig_read_divisor_id(up);
-       DEBUG_AUTOCONF("850id=%04x ", id1);
-
-       id2 = id1 >> 8;
-       if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
-               up->port.type = PORT_16850;
-               return;
-       }
-
-       /*
-        * It wasn't an XR16C850.
-        *
-        * We distinguish between the '654 and the '650 by counting
-        * how many bytes are in the FIFO.  I'm using this for now,
-        * since that's the technique that was sent to me in the
-        * serial driver update, but I'm not convinced this works.
-        * I've had problems doing this in the past.  -TYT
-        */
-       if (size_fifo(up) == 64)
-               up->port.type = PORT_16654;
-       else
-               up->port.type = PORT_16650V2;
-}
-
-/*
- * We detected a chip without a FIFO.  Only two fall into
- * this category - the original 8250 and the 16450.  The
- * 16450 has a scratch register (accessible with LCR=0)
- */
-static void autoconfig_8250(struct uart_8250_port *up)
-{
-       unsigned char scratch, status1, status2;
-
-       up->port.type = PORT_8250;
-
-       scratch = serial_in(up, UART_SCR);
-       serial_out(up, UART_SCR, 0xa5);
-       status1 = serial_in(up, UART_SCR);
-       serial_out(up, UART_SCR, 0x5a);
-       status2 = serial_in(up, UART_SCR);
-       serial_out(up, UART_SCR, scratch);
-
-       if (status1 == 0xa5 && status2 == 0x5a)
-               up->port.type = PORT_16450;
-}
-
-static int broken_efr(struct uart_8250_port *up)
-{
-       /*
-        * Exar ST16C2550 "A2" devices incorrectly detect as
-        * having an EFR, and report an ID of 0x0201.  See
-        * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html 
-        */
-       if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
-               return 1;
-
-       return 0;
-}
-
-static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
-{
-       unsigned char status;
-
-       status = serial_in(up, 0x04); /* EXCR2 */
-#define PRESL(x) ((x) & 0x30)
-       if (PRESL(status) == 0x10) {
-               /* already in high speed mode */
-               return 0;
-       } else {
-               status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
-               status |= 0x10;  /* 1.625 divisor for baud_base --> 921600 */
-               serial_out(up, 0x04, status);
-       }
-       return 1;
-}
-
-/*
- * We know that the chip has FIFOs.  Does it have an EFR?  The
- * EFR is located in the same register position as the IIR and
- * we know the top two bits of the IIR are currently set.  The
- * EFR should contain zero.  Try to read the EFR.
- */
-static void autoconfig_16550a(struct uart_8250_port *up)
-{
-       unsigned char status1, status2;
-       unsigned int iersave;
-
-       up->port.type = PORT_16550A;
-       up->capabilities |= UART_CAP_FIFO;
-
-       /*
-        * XR17V35x UARTs have an extra divisor register, DLD
-        * that gets enabled with when DLAB is set which will
-        * cause the device to incorrectly match and assign
-        * port type to PORT_16650.  The EFR for this UART is
-        * found at offset 0x09. Instead check the Deice ID (DVID)
-        * register for a 2, 4 or 8 port UART.
-        */
-       if (up->port.flags & UPF_EXAR_EFR) {
-               status1 = serial_in(up, UART_EXAR_DVID);
-               if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
-                       DEBUG_AUTOCONF("Exar XR17V35x ");
-                       up->port.type = PORT_XR17V35X;
-                       up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
-                                               UART_CAP_SLEEP;
-
-                       return;
-               }
-
-       }
-
-       /*
-        * Check for presence of the EFR when DLAB is set.
-        * Only ST16C650V1 UARTs pass this test.
-        */
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       if (serial_in(up, UART_EFR) == 0) {
-               serial_out(up, UART_EFR, 0xA8);
-               if (serial_in(up, UART_EFR) != 0) {
-                       DEBUG_AUTOCONF("EFRv1 ");
-                       up->port.type = PORT_16650;
-                       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
-               } else {
-                       DEBUG_AUTOCONF("Motorola 8xxx DUART ");
-               }
-               serial_out(up, UART_EFR, 0);
-               return;
-       }
-
-       /*
-        * Maybe it requires 0xbf to be written to the LCR.
-        * (other ST16C650V2 UARTs, TI16C752A, etc)
-        */
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-       if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
-               DEBUG_AUTOCONF("EFRv2 ");
-               autoconfig_has_efr(up);
-               return;
-       }
-
-       /*
-        * Check for a National Semiconductor SuperIO chip.
-        * Attempt to switch to bank 2, read the value of the LOOP bit
-        * from EXCR1. Switch back to bank 0, change it in MCR. Then
-        * switch back to bank 2, read it from EXCR1 again and check
-        * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
-        */
-       serial_out(up, UART_LCR, 0);
-       status1 = serial_in(up, UART_MCR);
-       serial_out(up, UART_LCR, 0xE0);
-       status2 = serial_in(up, 0x02); /* EXCR1 */
-
-       if (!((status2 ^ status1) & UART_MCR_LOOP)) {
-               serial_out(up, UART_LCR, 0);
-               serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
-               serial_out(up, UART_LCR, 0xE0);
-               status2 = serial_in(up, 0x02); /* EXCR1 */
-               serial_out(up, UART_LCR, 0);
-               serial_out(up, UART_MCR, status1);
-
-               if ((status2 ^ status1) & UART_MCR_LOOP) {
-                       unsigned short quot;
-
-                       serial_out(up, UART_LCR, 0xE0);
-
-                       quot = serial_dl_read(up);
-                       quot <<= 3;
-
-                       if (ns16550a_goto_highspeed(up))
-                               serial_dl_write(up, quot);
-
-                       serial_out(up, UART_LCR, 0);
-
-                       up->port.uartclk = 921600*16;
-                       up->port.type = PORT_NS16550A;
-                       up->capabilities |= UART_NATSEMI;
-                       return;
-               }
-       }
-
-       /*
-        * No EFR.  Try to detect a TI16750, which only sets bit 5 of
-        * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
-        * Try setting it with and without DLAB set.  Cheap clones
-        * set bit 5 without DLAB set.
-        */
-       serial_out(up, UART_LCR, 0);
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
-       status1 = serial_in(up, UART_IIR) >> 5;
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
-       status2 = serial_in(up, UART_IIR) >> 5;
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       serial_out(up, UART_LCR, 0);
-
-       DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
-
-       if (status1 == 6 && status2 == 7) {
-               up->port.type = PORT_16750;
-               up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
-               return;
-       }
-
-       /*
-        * Try writing and reading the UART_IER_UUE bit (b6).
-        * If it works, this is probably one of the Xscale platform's
-        * internal UARTs.
-        * We're going to explicitly set the UUE bit to 0 before
-        * trying to write and read a 1 just to make sure it's not
-        * already a 1 and maybe locked there before we even start start.
-        */
-       iersave = serial_in(up, UART_IER);
-       serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
-       if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
-               /*
-                * OK it's in a known zero state, try writing and reading
-                * without disturbing the current state of the other bits.
-                */
-               serial_out(up, UART_IER, iersave | UART_IER_UUE);
-               if (serial_in(up, UART_IER) & UART_IER_UUE) {
-                       /*
-                        * It's an Xscale.
-                        * We'll leave the UART_IER_UUE bit set to 1 (enabled).
-                        */
-                       DEBUG_AUTOCONF("Xscale ");
-                       up->port.type = PORT_XSCALE;
-                       up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
-                       return;
-               }
-       } else {
-               /*
-                * If we got here we couldn't force the IER_UUE bit to 0.
-                * Log it and continue.
-                */
-               DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
-       }
-       serial_out(up, UART_IER, iersave);
-
-       /*
-        * Exar uarts have EFR in a weird location
-        */
-       if (up->port.flags & UPF_EXAR_EFR) {
-               DEBUG_AUTOCONF("Exar XR17D15x ");
-               up->port.type = PORT_XR17D15X;
-               up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
-                                   UART_CAP_SLEEP;
-
-               return;
-       }
-
-       /*
-        * We distinguish between 16550A and U6 16550A by counting
-        * how many bytes are in the FIFO.
-        */
-       if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
-               up->port.type = PORT_U6_16550A;
-               up->capabilities |= UART_CAP_AFE;
-       }
-}
-
-/*
- * This routine is called by rs_init() to initialize a specific serial
- * port.  It determines what type of UART chip this serial port is
- * using: 8250, 16450, 16550, 16550A.  The important question is
- * whether or not this UART is a 16550A or not, since this will
- * determine whether or not we can use its FIFO features or not.
- */
-static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
-{
-       unsigned char status1, scratch, scratch2, scratch3;
-       unsigned char save_lcr, save_mcr;
-       struct uart_port *port = &up->port;
-       unsigned long flags;
-       unsigned int old_capabilities;
-
-       if (!port->iobase && !port->mapbase && !port->membase)
-               return;
-
-       DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
-                      serial_index(port), port->iobase, port->membase);
-
-       /*
-        * We really do need global IRQs disabled here - we're going to
-        * be frobbing the chips IRQ enable register to see if it exists.
-        */
-       spin_lock_irqsave(&port->lock, flags);
-
-       up->capabilities = 0;
-       up->bugs = 0;
-
-       if (!(port->flags & UPF_BUGGY_UART)) {
-               /*
-                * Do a simple existence test first; if we fail this,
-                * there's no point trying anything else.
-                *
-                * 0x80 is used as a nonsense port to prevent against
-                * false positives due to ISA bus float.  The
-                * assumption is that 0x80 is a non-existent port;
-                * which should be safe since include/asm/io.h also
-                * makes this assumption.
-                *
-                * Note: this is safe as long as MCR bit 4 is clear
-                * and the device is in "PC" mode.
-                */
-               scratch = serial_in(up, UART_IER);
-               serial_out(up, UART_IER, 0);
-#ifdef __i386__
-               outb(0xff, 0x080);
-#endif
-               /*
-                * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
-                * 16C754B) allow only to modify them if an EFR bit is set.
-                */
-               scratch2 = serial_in(up, UART_IER) & 0x0f;
-               serial_out(up, UART_IER, 0x0F);
-#ifdef __i386__
-               outb(0, 0x080);
-#endif
-               scratch3 = serial_in(up, UART_IER) & 0x0f;
-               serial_out(up, UART_IER, scratch);
-               if (scratch2 != 0 || scratch3 != 0x0F) {
-                       /*
-                        * We failed; there's nothing here
-                        */
-                       spin_unlock_irqrestore(&port->lock, flags);
-                       DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
-                                      scratch2, scratch3);
-                       goto out;
-               }
-       }
-
-       save_mcr = serial_in(up, UART_MCR);
-       save_lcr = serial_in(up, UART_LCR);
-
-       /*
-        * Check to see if a UART is really there.  Certain broken
-        * internal modems based on the Rockwell chipset fail this
-        * test, because they apparently don't implement the loopback
-        * test mode.  So this test is skipped on the COM 1 through
-        * COM 4 ports.  This *should* be safe, since no board
-        * manufacturer would be stupid enough to design a board
-        * that conflicts with COM 1-4 --- we hope!
-        */
-       if (!(port->flags & UPF_SKIP_TEST)) {
-               serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
-               status1 = serial_in(up, UART_MSR) & 0xF0;
-               serial_out(up, UART_MCR, save_mcr);
-               if (status1 != 0x90) {
-                       spin_unlock_irqrestore(&port->lock, flags);
-                       DEBUG_AUTOCONF("LOOP test failed (%02x) ",
-                                      status1);
-                       goto out;
-               }
-       }
-
-       /*
-        * We're pretty sure there's a port here.  Lets find out what
-        * type of port it is.  The IIR top two bits allows us to find
-        * out if it's 8250 or 16450, 16550, 16550A or later.  This
-        * determines what we test for next.
-        *
-        * We also initialise the EFR (if any) to zero for later.  The
-        * EFR occupies the same register location as the FCR and IIR.
-        */
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-       serial_out(up, UART_EFR, 0);
-       serial_out(up, UART_LCR, 0);
-
-       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-       scratch = serial_in(up, UART_IIR) >> 6;
-
-       switch (scratch) {
-       case 0:
-               autoconfig_8250(up);
-               break;
-       case 1:
-               port->type = PORT_UNKNOWN;
-               break;
-       case 2:
-               port->type = PORT_16550;
-               break;
-       case 3:
-               autoconfig_16550a(up);
-               break;
-       }
-
-#ifdef CONFIG_SERIAL_8250_RSA
-       /*
-        * Only probe for RSA ports if we got the region.
-        */
-       if (port->type == PORT_16550A && probeflags & PROBE_RSA) {
-               int i;
-
-               for (i = 0 ; i < probe_rsa_count; ++i) {
-                       if (probe_rsa[i] == port->iobase && __enable_rsa(up)) {
-                               port->type = PORT_RSA;
-                               break;
-                       }
-               }
-       }
-#endif
-
-       serial_out(up, UART_LCR, save_lcr);
-
-       port->fifosize = uart_config[up->port.type].fifo_size;
-       old_capabilities = up->capabilities; 
-       up->capabilities = uart_config[port->type].flags;
-       up->tx_loadsz = uart_config[port->type].tx_loadsz;
-
-       if (port->type == PORT_UNKNOWN)
-               goto out_lock;
-
-       /*
-        * Reset the UART.
-        */
-#ifdef CONFIG_SERIAL_8250_RSA
-       if (port->type == PORT_RSA)
-               serial_out(up, UART_RSA_FRR, 0);
-#endif
-       serial_out(up, UART_MCR, save_mcr);
-       serial8250_clear_fifos(up);
-       serial_in(up, UART_RX);
-       if (up->capabilities & UART_CAP_UUE)
-               serial_out(up, UART_IER, UART_IER_UUE);
-       else
-               serial_out(up, UART_IER, 0);
-
-out_lock:
-       spin_unlock_irqrestore(&port->lock, flags);
-       if (up->capabilities != old_capabilities) {
-               printk(KERN_WARNING
-                      "ttyS%d: detected caps %08x should be %08x\n",
-                      serial_index(port), old_capabilities,
-                      up->capabilities);
-       }
-out:
-       DEBUG_AUTOCONF("iir=%d ", scratch);
-       DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
-}
-
-static void autoconfig_irq(struct uart_8250_port *up)
-{
-       struct uart_port *port = &up->port;
-       unsigned char save_mcr, save_ier;
-       unsigned char save_ICP = 0;
-       unsigned int ICP = 0;
-       unsigned long irqs;
-       int irq;
-
-       if (port->flags & UPF_FOURPORT) {
-               ICP = (port->iobase & 0xfe0) | 0x1f;
-               save_ICP = inb_p(ICP);
-               outb_p(0x80, ICP);
-               inb_p(ICP);
-       }
-
-       /* forget possible initially masked and pending IRQ */
-       probe_irq_off(probe_irq_on());
-       save_mcr = serial_in(up, UART_MCR);
-       save_ier = serial_in(up, UART_IER);
-       serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
-
-       irqs = probe_irq_on();
-       serial_out(up, UART_MCR, 0);
-       udelay(10);
-       if (port->flags & UPF_FOURPORT) {
-               serial_out(up, UART_MCR,
-                           UART_MCR_DTR | UART_MCR_RTS);
-       } else {
-               serial_out(up, UART_MCR,
-                           UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
-       }
-       serial_out(up, UART_IER, 0x0f); /* enable all intrs */
-       serial_in(up, UART_LSR);
-       serial_in(up, UART_RX);
-       serial_in(up, UART_IIR);
-       serial_in(up, UART_MSR);
-       serial_out(up, UART_TX, 0xFF);
-       udelay(20);
-       irq = probe_irq_off(irqs);
-
-       serial_out(up, UART_MCR, save_mcr);
-       serial_out(up, UART_IER, save_ier);
-
-       if (port->flags & UPF_FOURPORT)
-               outb_p(save_ICP, ICP);
-
-       port->irq = (irq > 0) ? irq : 0;
-}
-
-static inline void __stop_tx(struct uart_8250_port *p)
-{
-       if (p->ier & UART_IER_THRI) {
-               p->ier &= ~UART_IER_THRI;
-               serial_out(p, UART_IER, p->ier);
-       }
-}
-
-static void serial8250_stop_tx(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       __stop_tx(up);
-
-       /*
-        * We really want to stop the transmitter from sending.
-        */
-       if (port->type == PORT_16C950) {
-               up->acr |= UART_ACR_TXDIS;
-               serial_icr_write(up, UART_ACR, up->acr);
-       }
-}
-
-static void serial8250_start_tx(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       if (up->dma && !serial8250_tx_dma(up)) {
-               return;
-       } else if (!(up->ier & UART_IER_THRI)) {
-               up->ier |= UART_IER_THRI;
-               serial_port_out(port, UART_IER, up->ier);
-
-               if (up->bugs & UART_BUG_TXEN) {
-                       unsigned char lsr;
-                       lsr = serial_in(up, UART_LSR);
-                       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-                       if (lsr & UART_LSR_TEMT)
-                               serial8250_tx_chars(up);
-               }
-       }
-
-       /*
-        * Re-enable the transmitter if we disabled it.
-        */
-       if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
-               up->acr &= ~UART_ACR_TXDIS;
-               serial_icr_write(up, UART_ACR, up->acr);
-       }
-}
-
-static void serial8250_stop_rx(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       up->ier &= ~UART_IER_RLSI;
-       up->port.read_status_mask &= ~UART_LSR_DR;
-       serial_port_out(port, UART_IER, up->ier);
-}
-
-static void serial8250_enable_ms(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       /* no MSR capabilities */
-       if (up->bugs & UART_BUG_NOMSR)
-               return;
-
-       up->ier |= UART_IER_MSI;
-       serial_port_out(port, UART_IER, up->ier);
-}
-
-/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
- */
-unsigned char
-serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
-{
-       struct uart_port *port = &up->port;
-       unsigned char ch;
-       int max_count = 256;
-       char flag;
-
-       do {
-               if (likely(lsr & UART_LSR_DR))
-                       ch = serial_in(up, UART_RX);
-               else
-                       /*
-                        * Intel 82571 has a Serial Over Lan device that will
-                        * set UART_LSR_BI without setting UART_LSR_DR when
-                        * it receives a break. To avoid reading from the
-                        * receive buffer without UART_LSR_DR bit set, we
-                        * just force the read character to be 0
-                        */
-                       ch = 0;
-
-               flag = TTY_NORMAL;
-               port->icount.rx++;
-
-               lsr |= up->lsr_saved_flags;
-               up->lsr_saved_flags = 0;
-
-               if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
-                       if (lsr & UART_LSR_BI) {
-                               lsr &= ~(UART_LSR_FE | UART_LSR_PE);
-                               port->icount.brk++;
-                               /*
-                                * We do the SysRQ and SAK checking
-                                * here because otherwise the break
-                                * may get masked by ignore_status_mask
-                                * or read_status_mask.
-                                */
-                               if (uart_handle_break(port))
-                                       goto ignore_char;
-                       } else if (lsr & UART_LSR_PE)
-                               port->icount.parity++;
-                       else if (lsr & UART_LSR_FE)
-                               port->icount.frame++;
-                       if (lsr & UART_LSR_OE)
-                               port->icount.overrun++;
-
-                       /*
-                        * Mask off conditions which should be ignored.
-                        */
-                       lsr &= port->read_status_mask;
-
-                       if (lsr & UART_LSR_BI) {
-                               DEBUG_INTR("handling break....");
-                               flag = TTY_BREAK;
-                       } else if (lsr & UART_LSR_PE)
-                               flag = TTY_PARITY;
-                       else if (lsr & UART_LSR_FE)
-                               flag = TTY_FRAME;
-               }
-               if (uart_handle_sysrq_char(port, ch))
-                       goto ignore_char;
-
-               uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
-ignore_char:
-               lsr = serial_in(up, UART_LSR);
-       } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
-       spin_unlock(&port->lock);
-       tty_flip_buffer_push(&port->state->port);
-       spin_lock(&port->lock);
-       return lsr;
-}
-EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-
-void serial8250_tx_chars(struct uart_8250_port *up)
-{
-       struct uart_port *port = &up->port;
-       struct circ_buf *xmit = &port->state->xmit;
-       int count;
-
-       if (port->x_char) {
-               serial_out(up, UART_TX, port->x_char);
-               port->icount.tx++;
-               port->x_char = 0;
-               return;
-       }
-       if (uart_tx_stopped(port)) {
-               serial8250_stop_tx(port);
-               return;
-       }
-       if (uart_circ_empty(xmit)) {
-               __stop_tx(up);
-               return;
-       }
-
-       count = up->tx_loadsz;
-       do {
-               serial_out(up, UART_TX, xmit->buf[xmit->tail]);
-               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-               port->icount.tx++;
-               if (uart_circ_empty(xmit))
-                       break;
-               if (up->capabilities & UART_CAP_HFIFO) {
-                       if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
-                           BOTH_EMPTY)
-                               break;
-               }
-       } while (--count > 0);
-
-       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
-               uart_write_wakeup(port);
-
-       DEBUG_INTR("THRE...");
-
-       if (uart_circ_empty(xmit))
-               __stop_tx(up);
-}
-EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-
-unsigned int serial8250_modem_status(struct uart_8250_port *up)
-{
-       struct uart_port *port = &up->port;
-       unsigned int status = serial_in(up, UART_MSR);
-
-       status |= up->msr_saved_flags;
-       up->msr_saved_flags = 0;
-       if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
-           port->state != NULL) {
-               if (status & UART_MSR_TERI)
-                       port->icount.rng++;
-               if (status & UART_MSR_DDSR)
-                       port->icount.dsr++;
-               if (status & UART_MSR_DDCD)
-                       uart_handle_dcd_change(port, status & UART_MSR_DCD);
-               if (status & UART_MSR_DCTS)
-                       uart_handle_cts_change(port, status & UART_MSR_CTS);
-
-               wake_up_interruptible(&port->state->port.delta_msr_wait);
-       }
-
-       return status;
-}
-EXPORT_SYMBOL_GPL(serial8250_modem_status);
-
-/*
- * This handles the interrupt from one port.
- */
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
-       unsigned char status;
-       unsigned long flags;
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       int dma_err = 0;
-
-       if (iir & UART_IIR_NO_INT)
-               return 0;
-
-       spin_lock_irqsave(&port->lock, flags);
-
-       status = serial_port_in(port, UART_LSR);
-
-       DEBUG_INTR("status = %x...", status);
-
-       if (status & (UART_LSR_DR | UART_LSR_BI)) {
-               if (up->dma)
-                       dma_err = serial8250_rx_dma(up, iir);
-
-               if (!up->dma || dma_err)
-                       status = serial8250_rx_chars(up, status);
-       }
-       serial8250_modem_status(up);
-       if (status & UART_LSR_THRE)
-               serial8250_tx_chars(up);
-
-       spin_unlock_irqrestore(&port->lock, flags);
-       return 1;
-}
-EXPORT_SYMBOL_GPL(serial8250_handle_irq);
-
-static int serial8250_default_handle_irq(struct uart_port *port)
-{
-       unsigned int iir = serial_port_in(port, UART_IIR);
-
-       return serial8250_handle_irq(port, iir);
-}
-
-/*
- * These Exar UARTs have an extra interrupt indicator that could
- * fire for a few unimplemented interrupts.  One of which is a
- * wakeup event when coming out of sleep.  Put this here just
- * to be on the safe side that these interrupts don't go unhandled.
- */
-static int exar_handle_irq(struct uart_port *port)
-{
-       unsigned char int0, int1, int2, int3;
-       unsigned int iir = serial_port_in(port, UART_IIR);
-       int ret;
-
-       ret = serial8250_handle_irq(port, iir);
-
-       if ((port->type == PORT_XR17V35X) ||
-          (port->type == PORT_XR17D15X)) {
-               int0 = serial_port_in(port, 0x80);
-               int1 = serial_port_in(port, 0x81);
-               int2 = serial_port_in(port, 0x82);
-               int3 = serial_port_in(port, 0x83);
-       }
-
-       return ret;
-}
-
-/*
- * This is the serial driver's interrupt routine.
- *
- * Arjan thinks the old way was overly complex, so it got simplified.
- * Alan disagrees, saying that need the complexity to handle the weird
- * nature of ISA shared interrupts.  (This is a special exception.)
- *
- * In order to handle ISA shared interrupts properly, we need to check
- * that all ports have been serviced, and therefore the ISA interrupt
- * line has been de-asserted.
- *
- * This means we need to loop through all ports. checking that they
- * don't have an interrupt pending.
- */
-static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
-{
-       struct irq_info *i = dev_id;
-       struct list_head *l, *end = NULL;
-       int pass_counter = 0, handled = 0;
-
-       DEBUG_INTR("serial8250_interrupt(%d)...", irq);
-
-       spin_lock(&i->lock);
-
-       l = i->head;
-       do {
-               struct uart_8250_port *up;
-               struct uart_port *port;
-
-               up = list_entry(l, struct uart_8250_port, list);
-               port = &up->port;
-
-               if (port->handle_irq(port)) {
-                       handled = 1;
-                       end = NULL;
-               } else if (end == NULL)
-                       end = l;
-
-               l = l->next;
-
-               if (l == i->head && pass_counter++ > PASS_LIMIT) {
-                       /* If we hit this, we're dead. */
-                       printk_ratelimited(KERN_ERR
-                               "serial8250: too much work for irq%d\n", irq);
-                       break;
-               }
-       } while (l != end);
-
-       spin_unlock(&i->lock);
-
-       DEBUG_INTR("end.\n");
-
-       return IRQ_RETVAL(handled);
-}
-
-/*
- * To support ISA shared interrupts, we need to have one interrupt
- * handler that ensures that the IRQ line has been deasserted
- * before returning.  Failing to do this will result in the IRQ
- * line being stuck active, and, since ISA irqs are edge triggered,
- * no more IRQs will be seen.
- */
-static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
-{
-       spin_lock_irq(&i->lock);
-
-       if (!list_empty(i->head)) {
-               if (i->head == &up->list)
-                       i->head = i->head->next;
-               list_del(&up->list);
-       } else {
-               BUG_ON(i->head != &up->list);
-               i->head = NULL;
-       }
-       spin_unlock_irq(&i->lock);
-       /* List empty so throw away the hash node */
-       if (i->head == NULL) {
-               hlist_del(&i->node);
-               kfree(i);
-       }
-}
-
-static int serial_link_irq_chain(struct uart_8250_port *up)
-{
-       struct hlist_head *h;
-       struct hlist_node *n;
-       struct irq_info *i;
-       int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
-
-       mutex_lock(&hash_mutex);
-
-       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
-
-       hlist_for_each(n, h) {
-               i = hlist_entry(n, struct irq_info, node);
-               if (i->irq == up->port.irq)
-                       break;
-       }
-
-       if (n == NULL) {
-               i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
-               if (i == NULL) {
-                       mutex_unlock(&hash_mutex);
-                       return -ENOMEM;
-               }
-               spin_lock_init(&i->lock);
-               i->irq = up->port.irq;
-               hlist_add_head(&i->node, h);
-       }
-       mutex_unlock(&hash_mutex);
-
-       spin_lock_irq(&i->lock);
-
-       if (i->head) {
-               list_add(&up->list, i->head);
-               spin_unlock_irq(&i->lock);
-
-               ret = 0;
-       } else {
-               INIT_LIST_HEAD(&up->list);
-               i->head = &up->list;
-               spin_unlock_irq(&i->lock);
-               irq_flags |= up->port.irqflags;
-               ret = request_irq(up->port.irq, serial8250_interrupt,
-                                 irq_flags, "serial", i);
-               if (ret < 0)
-                       serial_do_unlink(i, up);
-       }
-
-       return ret;
-}
-
-static void serial_unlink_irq_chain(struct uart_8250_port *up)
-{
-       struct irq_info *i;
-       struct hlist_node *n;
-       struct hlist_head *h;
-
-       mutex_lock(&hash_mutex);
-
-       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
-
-       hlist_for_each(n, h) {
-               i = hlist_entry(n, struct irq_info, node);
-               if (i->irq == up->port.irq)
-                       break;
-       }
-
-       BUG_ON(n == NULL);
-       BUG_ON(i->head == NULL);
-
-       if (list_empty(i->head))
-               free_irq(up->port.irq, i);
-
-       serial_do_unlink(i, up);
-       mutex_unlock(&hash_mutex);
-}
-
-/*
- * This function is used to handle ports that do not have an
- * interrupt.  This doesn't work very well for 16450's, but gives
- * barely passable results for a 16550A.  (Although at the expense
- * of much CPU overhead).
- */
-static void serial8250_timeout(unsigned long data)
-{
-       struct uart_8250_port *up = (struct uart_8250_port *)data;
-
-       up->port.handle_irq(&up->port);
-       mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
-}
-
-static void serial8250_backup_timeout(unsigned long data)
-{
-       struct uart_8250_port *up = (struct uart_8250_port *)data;
-       unsigned int iir, ier = 0, lsr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&up->port.lock, flags);
-
-       /*
-        * Must disable interrupts or else we risk racing with the interrupt
-        * based handler.
-        */
-       if (up->port.irq) {
-               ier = serial_in(up, UART_IER);
-               serial_out(up, UART_IER, 0);
-       }
-
-       iir = serial_in(up, UART_IIR);
-
-       /*
-        * This should be a safe test for anyone who doesn't trust the
-        * IIR bits on their UART, but it's specifically designed for
-        * the "Diva" UART used on the management processor on many HP
-        * ia64 and parisc boxes.
-        */
-       lsr = serial_in(up, UART_LSR);
-       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-       if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
-           (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
-           (lsr & UART_LSR_THRE)) {
-               iir &= ~(UART_IIR_ID | UART_IIR_NO_INT);
-               iir |= UART_IIR_THRI;
-       }
-
-       if (!(iir & UART_IIR_NO_INT))
-               serial8250_tx_chars(up);
-
-       if (up->port.irq)
-               serial_out(up, UART_IER, ier);
-
-       spin_unlock_irqrestore(&up->port.lock, flags);
-
-       /* Standard timer interval plus 0.2s to keep the port running */
-       mod_timer(&up->timer,
-               jiffies + uart_poll_timeout(&up->port) + HZ / 5);
-}
-
-static unsigned int serial8250_tx_empty(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned long flags;
-       unsigned int lsr;
-
-       spin_lock_irqsave(&port->lock, flags);
-       lsr = serial_port_in(port, UART_LSR);
-       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-       spin_unlock_irqrestore(&port->lock, flags);
-
-       return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int serial8250_get_mctrl(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned int status;
-       unsigned int ret;
-
-       status = serial8250_modem_status(up);
-
-       ret = 0;
-       if (status & UART_MSR_DCD)
-               ret |= TIOCM_CAR;
-       if (status & UART_MSR_RI)
-               ret |= TIOCM_RNG;
-       if (status & UART_MSR_DSR)
-               ret |= TIOCM_DSR;
-       if (status & UART_MSR_CTS)
-               ret |= TIOCM_CTS;
-       return ret;
-}
-
-static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned char mcr = 0;
-
-       if (mctrl & TIOCM_RTS)
-               mcr |= UART_MCR_RTS;
-       if (mctrl & TIOCM_DTR)
-               mcr |= UART_MCR_DTR;
-       if (mctrl & TIOCM_OUT1)
-               mcr |= UART_MCR_OUT1;
-       if (mctrl & TIOCM_OUT2)
-               mcr |= UART_MCR_OUT2;
-       if (mctrl & TIOCM_LOOP)
-               mcr |= UART_MCR_LOOP;
-
-       mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
-
-       serial_port_out(port, UART_MCR, mcr);
-}
-
-static void serial8250_break_ctl(struct uart_port *port, int break_state)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned long flags;
-
-       spin_lock_irqsave(&port->lock, flags);
-       if (break_state == -1)
-               up->lcr |= UART_LCR_SBC;
-       else
-               up->lcr &= ~UART_LCR_SBC;
-       serial_port_out(port, UART_LCR, up->lcr);
-       spin_unlock_irqrestore(&port->lock, flags);
-}
-
-/*
- *     Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
-{
-       unsigned int status, tmout = 10000;
-
-       /* Wait up to 10ms for the character(s) to be sent. */
-       for (;;) {
-               status = serial_in(up, UART_LSR);
-
-               up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
-
-               if ((status & bits) == bits)
-                       break;
-               if (--tmout == 0)
-                       break;
-               udelay(1);
-       }
-
-       /* Wait up to 1s for flow control if necessary */
-       if (up->port.flags & UPF_CONS_FLOW) {
-               unsigned int tmout;
-               for (tmout = 1000000; tmout; tmout--) {
-                       unsigned int msr = serial_in(up, UART_MSR);
-                       up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
-                       if (msr & UART_MSR_CTS)
-                               break;
-                       udelay(1);
-                       touch_nmi_watchdog();
-               }
-       }
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-/*
- * Console polling routines for writing and reading from the uart while
- * in an interrupt or debug context.
- */
-
-static int serial8250_get_poll_char(struct uart_port *port)
-{
-       unsigned char lsr = serial_port_in(port, UART_LSR);
-
-       if (!(lsr & UART_LSR_DR))
-               return NO_POLL_CHAR;
-
-       return serial_port_in(port, UART_RX);
-}
-
-
-static void serial8250_put_poll_char(struct uart_port *port,
-                        unsigned char c)
-{
-       unsigned int ier;
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       /*
-        *      First save the IER then disable the interrupts
-        */
-       ier = serial_port_in(port, UART_IER);
-       if (up->capabilities & UART_CAP_UUE)
-               serial_port_out(port, UART_IER, UART_IER_UUE);
-       else
-               serial_port_out(port, UART_IER, 0);
-
-       wait_for_xmitr(up, BOTH_EMPTY);
-       /*
-        *      Send the character out.
-        *      If a LF, also do CR...
-        */
-       serial_port_out(port, UART_TX, c);
-       if (c == 10) {
-               wait_for_xmitr(up, BOTH_EMPTY);
-               serial_port_out(port, UART_TX, 13);
-       }
-
-       /*
-        *      Finally, wait for transmitter to become empty
-        *      and restore the IER
-        */
-       wait_for_xmitr(up, BOTH_EMPTY);
-       serial_port_out(port, UART_IER, ier);
-}
-
-#endif /* CONFIG_CONSOLE_POLL */
-
-static int serial8250_startup(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned long flags;
-       unsigned char lsr, iir;
-       int retval;
-
-       if (port->type == PORT_8250_CIR)
-               return -ENODEV;
-
-       if (!port->fifosize)
-               port->fifosize = uart_config[port->type].fifo_size;
-       if (!up->tx_loadsz)
-               up->tx_loadsz = uart_config[port->type].tx_loadsz;
-       if (!up->capabilities)
-               up->capabilities = uart_config[port->type].flags;
-       up->mcr = 0;
-
-       if (port->iotype != up->cur_iotype)
-               set_io_from_upio(port);
-
-       if (port->type == PORT_16C950) {
-               /* Wake up and initialize UART */
-               up->acr = 0;
-               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-               serial_port_out(port, UART_EFR, UART_EFR_ECB);
-               serial_port_out(port, UART_IER, 0);
-               serial_port_out(port, UART_LCR, 0);
-               serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
-               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-               serial_port_out(port, UART_EFR, UART_EFR_ECB);
-               serial_port_out(port, UART_LCR, 0);
-       }
-
-#ifdef CONFIG_SERIAL_8250_RSA
-       /*
-        * If this is an RSA port, see if we can kick it up to the
-        * higher speed clock.
-        */
-       enable_rsa(up);
-#endif
-
-       /*
-        * Clear the FIFO buffers and disable them.
-        * (they will be reenabled in set_termios())
-        */
-       serial8250_clear_fifos(up);
-
-       /*
-        * Clear the interrupt registers.
-        */
-       serial_port_in(port, UART_LSR);
-       serial_port_in(port, UART_RX);
-       serial_port_in(port, UART_IIR);
-       serial_port_in(port, UART_MSR);
-
-       /*
-        * At this point, there's no way the LSR could still be 0xff;
-        * if it is, then bail out, because there's likely no UART
-        * here.
-        */
-       if (!(port->flags & UPF_BUGGY_UART) &&
-           (serial_port_in(port, UART_LSR) == 0xff)) {
-               printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
-                                  serial_index(port));
-               return -ENODEV;
-       }
-
-       /*
-        * For a XR16C850, we need to set the trigger levels
-        */
-       if (port->type == PORT_16850) {
-               unsigned char fctr;
-
-               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-
-               fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
-               serial_port_out(port, UART_FCTR,
-                               fctr | UART_FCTR_TRGD | UART_FCTR_RX);
-               serial_port_out(port, UART_TRG, UART_TRG_96);
-               serial_port_out(port, UART_FCTR,
-                               fctr | UART_FCTR_TRGD | UART_FCTR_TX);
-               serial_port_out(port, UART_TRG, UART_TRG_96);
-
-               serial_port_out(port, UART_LCR, 0);
-       }
-
-       if (port->irq) {
-               unsigned char iir1;
-               /*
-                * Test for UARTs that do not reassert THRE when the
-                * transmitter is idle and the interrupt has already
-                * been cleared.  Real 16550s should always reassert
-                * this interrupt whenever the transmitter is idle and
-                * the interrupt is enabled.  Delays are necessary to
-                * allow register changes to become visible.
-                */
-               spin_lock_irqsave(&port->lock, flags);
-               if (up->port.irqflags & IRQF_SHARED)
-                       disable_irq_nosync(port->irq);
-
-               wait_for_xmitr(up, UART_LSR_THRE);
-               serial_port_out_sync(port, UART_IER, UART_IER_THRI);
-               udelay(1); /* allow THRE to set */
-               iir1 = serial_port_in(port, UART_IIR);
-               serial_port_out(port, UART_IER, 0);
-               serial_port_out_sync(port, UART_IER, UART_IER_THRI);
-               udelay(1); /* allow a working UART time to re-assert THRE */
-               iir = serial_port_in(port, UART_IIR);
-               serial_port_out(port, UART_IER, 0);
-
-               if (port->irqflags & IRQF_SHARED)
-                       enable_irq(port->irq);
-               spin_unlock_irqrestore(&port->lock, flags);
-
-               /*
-                * If the interrupt is not reasserted, or we otherwise
-                * don't trust the iir, setup a timer to kick the UART
-                * on a regular basis.
-                */
-               if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
-                   up->port.flags & UPF_BUG_THRE) {
-                       up->bugs |= UART_BUG_THRE;
-                       pr_debug("ttyS%d - using backup timer\n",
-                                serial_index(port));
-               }
-       }
-
-       /*
-        * The above check will only give an accurate result the first time
-        * the port is opened so this value needs to be preserved.
-        */
-       if (up->bugs & UART_BUG_THRE) {
-               up->timer.function = serial8250_backup_timeout;
-               up->timer.data = (unsigned long)up;
-               mod_timer(&up->timer, jiffies +
-                       uart_poll_timeout(port) + HZ / 5);
-       }
-
-       /*
-        * If the "interrupt" for this port doesn't correspond with any
-        * hardware interrupt, we use a timer-based system.  The original
-        * driver used to do this with IRQ0.
-        */
-       if (!port->irq) {
-               up->timer.data = (unsigned long)up;
-               mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
-       } else {
-               retval = serial_link_irq_chain(up);
-               if (retval)
-                       return retval;
-       }
-
-       /*
-        * Now, initialize the UART
-        */
-       serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
-
-       spin_lock_irqsave(&port->lock, flags);
-       if (up->port.flags & UPF_FOURPORT) {
-               if (!up->port.irq)
-                       up->port.mctrl |= TIOCM_OUT1;
-       } else
-               /*
-                * Most PC uarts need OUT2 raised to enable interrupts.
-                */
-               if (port->irq)
-                       up->port.mctrl |= TIOCM_OUT2;
-
-       serial8250_set_mctrl(port, port->mctrl);
-
-       /* Serial over Lan (SoL) hack:
-          Intel 8257x Gigabit ethernet chips have a
-          16550 emulation, to be used for Serial Over Lan.
-          Those chips take a longer time than a normal
-          serial device to signalize that a transmission
-          data was queued. Due to that, the above test generally
-          fails. One solution would be to delay the reading of
-          iir. However, this is not reliable, since the timeout
-          is variable. So, let's just don't test if we receive
-          TX irq. This way, we'll never enable UART_BUG_TXEN.
-        */
-       if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
-               goto dont_test_tx_en;
-
-       /*
-        * Do a quick test to see if we receive an
-        * interrupt when we enable the TX irq.
-        */
-       serial_port_out(port, UART_IER, UART_IER_THRI);
-       lsr = serial_port_in(port, UART_LSR);
-       iir = serial_port_in(port, UART_IIR);
-       serial_port_out(port, UART_IER, 0);
-
-       if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
-               if (!(up->bugs & UART_BUG_TXEN)) {
-                       up->bugs |= UART_BUG_TXEN;
-                       pr_debug("ttyS%d - enabling bad tx status workarounds\n",
-                                serial_index(port));
-               }
-       } else {
-               up->bugs &= ~UART_BUG_TXEN;
-       }
-
-dont_test_tx_en:
-       spin_unlock_irqrestore(&port->lock, flags);
-
-       /*
-        * Clear the interrupt registers again for luck, and clear the
-        * saved flags to avoid getting false values from polling
-        * routines or the previous session.
-        */
-       serial_port_in(port, UART_LSR);
-       serial_port_in(port, UART_RX);
-       serial_port_in(port, UART_IIR);
-       serial_port_in(port, UART_MSR);
-       up->lsr_saved_flags = 0;
-       up->msr_saved_flags = 0;
-
-       /*
-        * Request DMA channels for both RX and TX.
-        */
-       if (up->dma) {
-               retval = serial8250_request_dma(up);
-               if (retval) {
-                       pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
-                                           serial_index(port));
-                       up->dma = NULL;
-               }
-       }
-
-       /*
-        * Finally, enable interrupts.  Note: Modem status interrupts
-        * are set via set_termios(), which will be occurring imminently
-        * anyway, so we don't enable them here.
-        */
-       up->ier = UART_IER_RLSI | UART_IER_RDI;
-       serial_port_out(port, UART_IER, up->ier);
-
-       if (port->flags & UPF_FOURPORT) {
-               unsigned int icp;
-               /*
-                * Enable interrupts on the AST Fourport board
-                */
-               icp = (port->iobase & 0xfe0) | 0x01f;
-               outb_p(0x80, icp);
-               inb_p(icp);
-       }
-
-       return 0;
-}
-
-static void serial8250_shutdown(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned long flags;
-
-       /*
-        * Disable interrupts from this port
-        */
-       up->ier = 0;
-       serial_port_out(port, UART_IER, 0);
-
-       if (up->dma)
-               serial8250_release_dma(up);
-
-       spin_lock_irqsave(&port->lock, flags);
-       if (port->flags & UPF_FOURPORT) {
-               /* reset interrupts on the AST Fourport board */
-               inb((port->iobase & 0xfe0) | 0x1f);
-               port->mctrl |= TIOCM_OUT1;
-       } else
-               port->mctrl &= ~TIOCM_OUT2;
-
-       serial8250_set_mctrl(port, port->mctrl);
-       spin_unlock_irqrestore(&port->lock, flags);
-
-       /*
-        * Disable break condition and FIFOs
-        */
-       serial_port_out(port, UART_LCR,
-                       serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
-       serial8250_clear_fifos(up);
-
-#ifdef CONFIG_SERIAL_8250_RSA
-       /*
-        * Reset the RSA board back to 115kbps compat mode.
-        */
-       disable_rsa(up);
-#endif
-
-       /*
-        * Read data port to reset things, and then unlink from
-        * the IRQ chain.
-        */
-       serial_port_in(port, UART_RX);
-
-       del_timer_sync(&up->timer);
-       up->timer.function = serial8250_timeout;
-       if (port->irq)
-               serial_unlink_irq_chain(up);
-}
-
-static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
-{
-       unsigned int quot;
-
-       /*
-        * Handle magic divisors for baud rates above baud_base on
-        * SMSC SuperIO chips.
-        */
-       if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
-           baud == (port->uartclk/4))
-               quot = 0x8001;
-       else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
-                baud == (port->uartclk/8))
-               quot = 0x8002;
-       else
-               quot = uart_get_divisor(port, baud);
-
-       return quot;
-}
-
-void
-serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
-                         struct ktermios *old)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       unsigned char cval, fcr = 0;
-       unsigned long flags;
-       unsigned int baud, quot;
-       int fifo_bug = 0;
-
-       switch (termios->c_cflag & CSIZE) {
-       case CS5:
-               cval = UART_LCR_WLEN5;
-               break;
-       case CS6:
-               cval = UART_LCR_WLEN6;
-               break;
-       case CS7:
-               cval = UART_LCR_WLEN7;
-               break;
-       default:
-       case CS8:
-               cval = UART_LCR_WLEN8;
-               break;
-       }
-
-       if (termios->c_cflag & CSTOPB)
-               cval |= UART_LCR_STOP;
-       if (termios->c_cflag & PARENB) {
-               cval |= UART_LCR_PARITY;
-               if (up->bugs & UART_BUG_PARITY)
-                       fifo_bug = 1;
-       }
-       if (!(termios->c_cflag & PARODD))
-               cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
-       if (termios->c_cflag & CMSPAR)
-               cval |= UART_LCR_SPAR;
-#endif
-
-       /*
-        * Ask the core to calculate the divisor for us.
-        */
-       baud = uart_get_baud_rate(port, termios, old,
-                                 port->uartclk / 16 / 0xffff,
-                                 port->uartclk / 16);
-       quot = serial8250_get_divisor(port, baud);
-
-       /*
-        * Oxford Semi 952 rev B workaround
-        */
-       if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
-               quot++;
-
-       if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
-               fcr = uart_config[port->type].fcr;
-               if (baud < 2400 || fifo_bug) {
-                       fcr &= ~UART_FCR_TRIGGER_MASK;
-                       fcr |= UART_FCR_TRIGGER_1;
-               }
-       }
-
-       /*
-        * MCR-based auto flow control.  When AFE is enabled, RTS will be
-        * deasserted when the receive FIFO contains more characters than
-        * the trigger, or the MCR RTS bit is cleared.  In the case where
-        * the remote UART is not using CTS auto flow control, we must
-        * have sufficient FIFO entries for the latency of the remote
-        * UART to respond.  IOW, at least 32 bytes of FIFO.
-        */
-       if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
-               up->mcr &= ~UART_MCR_AFE;
-               if (termios->c_cflag & CRTSCTS)
-                       up->mcr |= UART_MCR_AFE;
-       }
-
-       /*
-        * Ok, we're now changing the port state.  Do it with
-        * interrupts disabled.
-        */
-       spin_lock_irqsave(&port->lock, flags);
-
-       /*
-        * Update the per-port timeout.
-        */
-       uart_update_timeout(port, termios->c_cflag, baud);
-
-       port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
-       if (termios->c_iflag & INPCK)
-               port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
-               port->read_status_mask |= UART_LSR_BI;
-
-       /*
-        * Characteres to ignore
-        */
-       port->ignore_status_mask = 0;
-       if (termios->c_iflag & IGNPAR)
-               port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
-       if (termios->c_iflag & IGNBRK) {
-               port->ignore_status_mask |= UART_LSR_BI;
-               /*
-                * If we're ignoring parity and break indicators,
-                * ignore overruns too (for real raw support).
-                */
-               if (termios->c_iflag & IGNPAR)
-                       port->ignore_status_mask |= UART_LSR_OE;
-       }
-
-       /*
-        * ignore all characters if CREAD is not set
-        */
-       if ((termios->c_cflag & CREAD) == 0)
-               port->ignore_status_mask |= UART_LSR_DR;
-
-       /*
-        * CTS flow control flag and modem status interrupts
-        */
-       up->ier &= ~UART_IER_MSI;
-       if (!(up->bugs & UART_BUG_NOMSR) &&
-                       UART_ENABLE_MS(&up->port, termios->c_cflag))
-               up->ier |= UART_IER_MSI;
-       if (up->capabilities & UART_CAP_UUE)
-               up->ier |= UART_IER_UUE;
-       if (up->capabilities & UART_CAP_RTOIE)
-               up->ier |= UART_IER_RTOIE;
-
-       serial_port_out(port, UART_IER, up->ier);
-
-       if (up->capabilities & UART_CAP_EFR) {
-               unsigned char efr = 0;
-               /*
-                * TI16C752/Startech hardware flow control.  FIXME:
-                * - TI16C752 requires control thresholds to be set.
-                * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
-                */
-               if (termios->c_cflag & CRTSCTS)
-                       efr |= UART_EFR_CTS;
-
-               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-               if (port->flags & UPF_EXAR_EFR)
-                       serial_port_out(port, UART_XR_EFR, efr);
-               else
-                       serial_port_out(port, UART_EFR, efr);
-       }
-
-       /* Workaround to enable 115200 baud on OMAP1510 internal ports */
-       if (is_omap1510_8250(up)) {
-               if (baud == 115200) {
-                       quot = 1;
-                       serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
-               } else
-                       serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
-       }
-
-       /*
-        * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
-        * otherwise just set DLAB
-        */
-       if (up->capabilities & UART_NATSEMI)
-               serial_port_out(port, UART_LCR, 0xe0);
-       else
-               serial_port_out(port, UART_LCR, cval | UART_LCR_DLAB);
-
-       serial_dl_write(up, quot);
-
-       /*
-        * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
-        * is written without DLAB set, this mode will be disabled.
-        */
-       if (port->type == PORT_16750)
-               serial_port_out(port, UART_FCR, fcr);
-
-       serial_port_out(port, UART_LCR, cval);          /* reset DLAB */
-       up->lcr = cval;                                 /* Save LCR */
-       if (port->type != PORT_16750) {
-               /* emulated UARTs (Lucent Venus 167x) need two steps */
-               if (fcr & UART_FCR_ENABLE_FIFO)
-                       serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
-               serial_port_out(port, UART_FCR, fcr);           /* set fcr */
-       }
-       serial8250_set_mctrl(port, port->mctrl);
-       spin_unlock_irqrestore(&port->lock, flags);
-       /* Don't rewrite B0 */
-       if (tty_termios_baud_rate(termios))
-               tty_termios_encode_baud_rate(termios, baud, baud);
-}
-EXPORT_SYMBOL(serial8250_do_set_termios);
-
-static void
-serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
-                      struct ktermios *old)
-{
-       if (port->set_termios)
-               port->set_termios(port, termios, old);
-       else
-               serial8250_do_set_termios(port, termios, old);
-}
-
-static void
-serial8250_set_ldisc(struct uart_port *port, int new)
-{
-       if (new == N_PPS) {
-               port->flags |= UPF_HARDPPS_CD;
-               serial8250_enable_ms(port);
-       } else
-               port->flags &= ~UPF_HARDPPS_CD;
-}
-
-
-void serial8250_do_pm(struct uart_port *port, unsigned int state,
-                     unsigned int oldstate)
-{
-       struct uart_8250_port *p =
-               container_of(port, struct uart_8250_port, port);
-
-       serial8250_set_sleep(p, state != 0);
-}
-EXPORT_SYMBOL(serial8250_do_pm);
-
-static void
-serial8250_pm(struct uart_port *port, unsigned int state,
-             unsigned int oldstate)
-{
-       if (port->pm)
-               port->pm(port, state, oldstate);
-       else
-               serial8250_do_pm(port, state, oldstate);
-}
-
-static unsigned int serial8250_port_size(struct uart_8250_port *pt)
-{
-       if (pt->port.iotype == UPIO_AU)
-               return 0x1000;
-       if (is_omap1_8250(pt))
-               return 0x16 << pt->port.regshift;
-
-       return 8 << pt->port.regshift;
-}
-
-/*
- * Resource handling.
- */
-static int serial8250_request_std_resource(struct uart_8250_port *up)
-{
-       unsigned int size = serial8250_port_size(up);
-       struct uart_port *port = &up->port;
-       int ret = 0;
-
-       switch (port->iotype) {
-       case UPIO_AU:
-       case UPIO_TSI:
-       case UPIO_MEM32:
-       case UPIO_MEM:
-               if (!port->mapbase)
-                       break;
-
-               if (!request_mem_region(port->mapbase, size, "serial")) {
-                       ret = -EBUSY;
-                       break;
-               }
-
-               if (port->flags & UPF_IOREMAP) {
-                       port->membase = ioremap_nocache(port->mapbase, size);
-                       if (!port->membase) {
-                               release_mem_region(port->mapbase, size);
-                               ret = -ENOMEM;
-                       }
-               }
-               break;
-
-       case UPIO_HUB6:
-       case UPIO_PORT:
-               if (!request_region(port->iobase, size, "serial"))
-                       ret = -EBUSY;
-               break;
-       }
-       return ret;
-}
-
-static void serial8250_release_std_resource(struct uart_8250_port *up)
-{
-       unsigned int size = serial8250_port_size(up);
-       struct uart_port *port = &up->port;
-
-       switch (port->iotype) {
-       case UPIO_AU:
-       case UPIO_TSI:
-       case UPIO_MEM32:
-       case UPIO_MEM:
-               if (!port->mapbase)
-                       break;
-
-               if (port->flags & UPF_IOREMAP) {
-                       iounmap(port->membase);
-                       port->membase = NULL;
-               }
-
-               release_mem_region(port->mapbase, size);
-               break;
-
-       case UPIO_HUB6:
-       case UPIO_PORT:
-               release_region(port->iobase, size);
-               break;
-       }
-}
-
-static int serial8250_request_rsa_resource(struct uart_8250_port *up)
-{
-       unsigned long start = UART_RSA_BASE << up->port.regshift;
-       unsigned int size = 8 << up->port.regshift;
-       struct uart_port *port = &up->port;
-       int ret = -EINVAL;
-
-       switch (port->iotype) {
-       case UPIO_HUB6:
-       case UPIO_PORT:
-               start += port->iobase;
-               if (request_region(start, size, "serial-rsa"))
-                       ret = 0;
-               else
-                       ret = -EBUSY;
-               break;
-       }
-
-       return ret;
-}
-
-static void serial8250_release_rsa_resource(struct uart_8250_port *up)
-{
-       unsigned long offset = UART_RSA_BASE << up->port.regshift;
-       unsigned int size = 8 << up->port.regshift;
-       struct uart_port *port = &up->port;
-
-       switch (port->iotype) {
-       case UPIO_HUB6:
-       case UPIO_PORT:
-               release_region(port->iobase + offset, size);
-               break;
-       }
-}
-
-static void serial8250_release_port(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       serial8250_release_std_resource(up);
-       if (port->type == PORT_RSA)
-               serial8250_release_rsa_resource(up);
-}
-
-static int serial8250_request_port(struct uart_port *port)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       int ret;
-
-       if (port->type == PORT_8250_CIR)
-               return -ENODEV;
-
-       ret = serial8250_request_std_resource(up);
-       if (ret == 0 && port->type == PORT_RSA) {
-               ret = serial8250_request_rsa_resource(up);
-               if (ret < 0)
-                       serial8250_release_std_resource(up);
-       }
-
-       return ret;
-}
-
-static void serial8250_config_port(struct uart_port *port, int flags)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-       int probeflags = PROBE_ANY;
-       int ret;
-
-       if (port->type == PORT_8250_CIR)
-               return;
-
-       /*
-        * Find the region that we can probe for.  This in turn
-        * tells us whether we can probe for the type of port.
-        */
-       ret = serial8250_request_std_resource(up);
-       if (ret < 0)
-               return;
-
-       ret = serial8250_request_rsa_resource(up);
-       if (ret < 0)
-               probeflags &= ~PROBE_RSA;
-
-       if (port->iotype != up->cur_iotype)
-               set_io_from_upio(port);
-
-       if (flags & UART_CONFIG_TYPE)
-               autoconfig(up, probeflags);
-
-       /* if access method is AU, it is a 16550 with a quirk */
-       if (port->type == PORT_16550A && port->iotype == UPIO_AU)
-               up->bugs |= UART_BUG_NOMSR;
-
-       if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
-               autoconfig_irq(up);
-
-       if (port->type != PORT_RSA && probeflags & PROBE_RSA)
-               serial8250_release_rsa_resource(up);
-       if (port->type == PORT_UNKNOWN)
-               serial8250_release_std_resource(up);
-
-       /* Fixme: probably not the best place for this */
-       if ((port->type == PORT_XR17V35X) ||
-          (port->type == PORT_XR17D15X))
-               port->handle_irq = exar_handle_irq;
-}
-
-static int
-serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
-       if (ser->irq >= nr_irqs || ser->irq < 0 ||
-           ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
-           ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
-           ser->type == PORT_STARTECH)
-               return -EINVAL;
-       return 0;
-}
-
-static const char *
-serial8250_type(struct uart_port *port)
-{
-       int type = port->type;
-
-       if (type >= ARRAY_SIZE(uart_config))
-               type = 0;
-       return uart_config[type].name;
-}
-
-static struct uart_ops serial8250_pops = {
-       .tx_empty       = serial8250_tx_empty,
-       .set_mctrl      = serial8250_set_mctrl,
-       .get_mctrl      = serial8250_get_mctrl,
-       .stop_tx        = serial8250_stop_tx,
-       .start_tx       = serial8250_start_tx,
-       .stop_rx        = serial8250_stop_rx,
-       .enable_ms      = serial8250_enable_ms,
-       .break_ctl      = serial8250_break_ctl,
-       .startup        = serial8250_startup,
-       .shutdown       = serial8250_shutdown,
-       .set_termios    = serial8250_set_termios,
-       .set_ldisc      = serial8250_set_ldisc,
-       .pm             = serial8250_pm,
-       .type           = serial8250_type,
-       .release_port   = serial8250_release_port,
-       .request_port   = serial8250_request_port,
-       .config_port    = serial8250_config_port,
-       .verify_port    = serial8250_verify_port,
-#ifdef CONFIG_CONSOLE_POLL
-       .poll_get_char = serial8250_get_poll_char,
-       .poll_put_char = serial8250_put_poll_char,
-#endif
-};
-
-static struct uart_8250_port serial8250_ports[UART_NR];
-
-static void (*serial8250_isa_config)(int port, struct uart_port *up,
-       unsigned short *capabilities);
-
-void serial8250_set_isa_configurator(
-       void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
-{
-       serial8250_isa_config = v;
-}
-EXPORT_SYMBOL(serial8250_set_isa_configurator);
-
-static void __init serial8250_isa_init_ports(void)
-{
-       struct uart_8250_port *up;
-       static int first = 1;
-       int i, irqflag = 0;
-
-       if (!first)
-               return;
-       first = 0;
-
-       if (nr_uarts > UART_NR)
-               nr_uarts = UART_NR;
-
-       for (i = 0; i < nr_uarts; i++) {
-               struct uart_8250_port *up = &serial8250_ports[i];
-               struct uart_port *port = &up->port;
-
-               port->line = i;
-               spin_lock_init(&port->lock);
-
-               init_timer(&up->timer);
-               up->timer.function = serial8250_timeout;
-               up->cur_iotype = 0xFF;
-
-               /*
-                * ALPHA_KLUDGE_MCR needs to be killed.
-                */
-               up->mcr_mask = ~ALPHA_KLUDGE_MCR;
-               up->mcr_force = ALPHA_KLUDGE_MCR;
-
-               port->ops = &serial8250_pops;
-       }
-
-       if (share_irqs)
-               irqflag = IRQF_SHARED;
-
-       for (i = 0, up = serial8250_ports;
-            i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
-            i++, up++) {
-               struct uart_port *port = &up->port;
-
-               port->iobase   = old_serial_port[i].port;
-               port->irq      = irq_canonicalize(old_serial_port[i].irq);
-               port->irqflags = old_serial_port[i].irqflags;
-               port->uartclk  = old_serial_port[i].baud_base * 16;
-               port->flags    = old_serial_port[i].flags;
-               port->hub6     = old_serial_port[i].hub6;
-               port->membase  = old_serial_port[i].iomem_base;
-               port->iotype   = old_serial_port[i].io_type;
-               port->regshift = old_serial_port[i].iomem_reg_shift;
-               set_io_from_upio(port);
-               port->irqflags |= irqflag;
-               if (serial8250_isa_config != NULL)
-                       serial8250_isa_config(i, &up->port, &up->capabilities);
-
-       }
-}
-
-static void
-serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
-{
-       up->port.type = type;
-       if (!up->port.fifosize)
-               up->port.fifosize = uart_config[type].fifo_size;
-       if (!up->tx_loadsz)
-               up->tx_loadsz = uart_config[type].tx_loadsz;
-       if (!up->capabilities)
-               up->capabilities = uart_config[type].flags;
-}
-
-static void __init
-serial8250_register_ports(struct uart_driver *drv, struct device *dev)
-{
-       int i;
-
-       for (i = 0; i < nr_uarts; i++) {
-               struct uart_8250_port *up = &serial8250_ports[i];
-
-               if (up->port.dev)
-                       continue;
-
-               up->port.dev = dev;
-
-               if (up->port.flags & UPF_FIXED_TYPE)
-                       serial8250_init_fixed_type_port(up, up->port.type);
-
-               uart_add_one_port(drv, &up->port);
-       }
-}
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-static void serial8250_console_putchar(struct uart_port *port, int ch)
-{
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
-
-       wait_for_xmitr(up, UART_LSR_THRE);
-       serial_port_out(port, UART_TX, ch);
-}
-
-/*
- *     Print a string to the serial port trying not to disturb
- *     any possible real use of the port...
- *
- *     The console_lock must be held when we get here.
- */
-static void
-serial8250_console_write(struct console *co, const char *s, unsigned int count)
-{
-       struct uart_8250_port *up = &serial8250_ports[co->index];
-       struct uart_port *port = &up->port;
-       unsigned long flags;
-       unsigned int ier;
-       int locked = 1;
-
-       touch_nmi_watchdog();
-
-       local_irq_save(flags);
-       if (port->sysrq) {
-               /* serial8250_handle_irq() already took the lock */
-               locked = 0;
-       } else if (oops_in_progress) {
-               locked = spin_trylock(&port->lock);
-       } else
-               spin_lock(&port->lock);
-
-       /*
-        *      First save the IER then disable the interrupts
-        */
-       ier = serial_port_in(port, UART_IER);
-
-       if (up->capabilities & UART_CAP_UUE)
-               serial_port_out(port, UART_IER, UART_IER_UUE);
-       else
-               serial_port_out(port, UART_IER, 0);
-
-       uart_console_write(port, s, count, serial8250_console_putchar);
-
-       /*
-        *      Finally, wait for transmitter to become empty
-        *      and restore the IER
-        */
-       wait_for_xmitr(up, BOTH_EMPTY);
-       serial_port_out(port, UART_IER, ier);
-
-       /*
-        *      The receive handling will happen properly because the
-        *      receive ready bit will still be set; it is not cleared
-        *      on read.  However, modem control will not, we must
-        *      call it if we have saved something in the saved flags
-        *      while processing with interrupts off.
-        */
-       if (up->msr_saved_flags)
-               serial8250_modem_status(up);
-
-       if (locked)
-               spin_unlock(&port->lock);
-       local_irq_restore(flags);
-}
-
-static int __init serial8250_console_setup(struct console *co, char *options)
-{
-       struct uart_port *port;
-       int baud = 9600;
-       int bits = 8;
-       int parity = 'n';
-       int flow = 'n';
-
-       /*
-        * Check whether an invalid uart number has been specified, and
-        * if so, search for the first available port that does have
-        * console support.
-        */
-       if (co->index >= nr_uarts)
-               co->index = 0;
-       port = &serial8250_ports[co->index].port;
-       if (!port->iobase && !port->membase)
-               return -ENODEV;
-
-       if (options)
-               uart_parse_options(options, &baud, &parity, &bits, &flow);
-
-       return uart_set_options(port, co, baud, parity, bits, flow);
-}
-
-static int serial8250_console_early_setup(void)
-{
-       return serial8250_find_port_for_earlycon();
-}
-
-static struct console serial8250_console = {
-       .name           = "ttyS",
-       .write          = serial8250_console_write,
-       .device         = uart_console_device,
-       .setup          = serial8250_console_setup,
-       .early_setup    = serial8250_console_early_setup,
-       .flags          = CON_PRINTBUFFER | CON_ANYTIME,
-       .index          = -1,
-       .data           = &serial8250_reg,
-};
-
-static int __init serial8250_console_init(void)
-{
-       serial8250_isa_init_ports();
-       register_console(&serial8250_console);
-       return 0;
-}
-console_initcall(serial8250_console_init);
-
-int serial8250_find_port(struct uart_port *p)
-{
-       int line;
-       struct uart_port *port;
-
-       for (line = 0; line < nr_uarts; line++) {
-               port = &serial8250_ports[line].port;
-               if (uart_match_port(p, port))
-                       return line;
-       }
-       return -ENODEV;
-}
-
-#define SERIAL8250_CONSOLE     &serial8250_console
-#else
-#define SERIAL8250_CONSOLE     NULL
-#endif
-
-static struct uart_driver serial8250_reg = {
-       .owner                  = THIS_MODULE,
-       .driver_name            = "serial",
-       .dev_name               = "ttyS",
-       .major                  = TTY_MAJOR,
-       .minor                  = 64,
-       .cons                   = SERIAL8250_CONSOLE,
-};
-
-/*
- * early_serial_setup - early registration for 8250 ports
- *
- * Setup an 8250 port structure prior to console initialisation.  Use
- * after console initialisation will cause undefined behaviour.
- */
-int __init early_serial_setup(struct uart_port *port)
-{
-       struct uart_port *p;
-
-       if (port->line >= ARRAY_SIZE(serial8250_ports))
-               return -ENODEV;
-
-       serial8250_isa_init_ports();
-       p = &serial8250_ports[port->line].port;
-       p->iobase       = port->iobase;
-       p->membase      = port->membase;
-       p->irq          = port->irq;
-       p->irqflags     = port->irqflags;
-       p->uartclk      = port->uartclk;
-       p->fifosize     = port->fifosize;
-       p->regshift     = port->regshift;
-       p->iotype       = port->iotype;
-       p->flags        = port->flags;
-       p->mapbase      = port->mapbase;
-       p->private_data = port->private_data;
-       p->type         = port->type;
-       p->line         = port->line;
-
-       set_io_from_upio(p);
-       if (port->serial_in)
-               p->serial_in = port->serial_in;
-       if (port->serial_out)
-               p->serial_out = port->serial_out;
-       if (port->handle_irq)
-               p->handle_irq = port->handle_irq;
-       else
-               p->handle_irq = serial8250_default_handle_irq;
-
-       return 0;
-}
-
-/**
- *     serial8250_suspend_port - suspend one serial port
- *     @line:  serial line number
- *
- *     Suspend one serial port.
- */
-void serial8250_suspend_port(int line)
-{
-       uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port);
-}
-
-/**
- *     serial8250_resume_port - resume one serial port
- *     @line:  serial line number
- *
- *     Resume one serial port.
- */
-void serial8250_resume_port(int line)
-{
-       struct uart_8250_port *up = &serial8250_ports[line];
-       struct uart_port *port = &up->port;
-
-       if (up->capabilities & UART_NATSEMI) {
-               /* Ensure it's still in high speed mode */
-               serial_port_out(port, UART_LCR, 0xE0);
-
-               ns16550a_goto_highspeed(up);
-
-               serial_port_out(port, UART_LCR, 0);
-               port->uartclk = 921600*16;
-       }
-       uart_resume_port(&serial8250_reg, port);
-}
-
-/*
- * Register a set of serial devices attached to a platform device.  The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set.
- */
-static int serial8250_probe(struct platform_device *dev)
-{
-       struct plat_serial8250_port *p = dev->dev.platform_data;
-       struct uart_8250_port uart;
-       int ret, i, irqflag = 0;
-
-       memset(&uart, 0, sizeof(uart));
-
-       if (share_irqs)
-               irqflag = IRQF_SHARED;
-
-       for (i = 0; p && p->flags != 0; p++, i++) {
-               uart.port.iobase        = p->iobase;
-               uart.port.membase       = p->membase;
-               uart.port.irq           = p->irq;
-               uart.port.irqflags      = p->irqflags;
-               uart.port.uartclk       = p->uartclk;
-               uart.port.regshift      = p->regshift;
-               uart.port.iotype        = p->iotype;
-               uart.port.flags         = p->flags;
-               uart.port.mapbase       = p->mapbase;
-               uart.port.hub6          = p->hub6;
-               uart.port.private_data  = p->private_data;
-               uart.port.type          = p->type;
-               uart.port.serial_in     = p->serial_in;
-               uart.port.serial_out    = p->serial_out;
-               uart.port.handle_irq    = p->handle_irq;
-               uart.port.handle_break  = p->handle_break;
-               uart.port.set_termios   = p->set_termios;
-               uart.port.pm            = p->pm;
-               uart.port.dev           = &dev->dev;
-               uart.port.irqflags      |= irqflag;
-               ret = serial8250_register_8250_port(&uart);
-               if (ret < 0) {
-                       dev_err(&dev->dev, "unable to register port at index %d "
-                               "(IO%lx MEM%llx IRQ%d): %d\n", i,
-                               p->iobase, (unsigned long long)p->mapbase,
-                               p->irq, ret);
-               }
-       }
-       return 0;
-}
-
-/*
- * Remove serial ports registered against a platform device.
- */
-static int serial8250_remove(struct platform_device *dev)
-{
-       int i;
-
-       for (i = 0; i < nr_uarts; i++) {
-               struct uart_8250_port *up = &serial8250_ports[i];
-
-               if (up->port.dev == &dev->dev)
-                       serial8250_unregister_port(i);
-       }
-       return 0;
-}
-
-static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
-{
-       int i;
-
-       for (i = 0; i < UART_NR; i++) {
-               struct uart_8250_port *up = &serial8250_ports[i];
-
-               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
-                       uart_suspend_port(&serial8250_reg, &up->port);
-       }
-
-       return 0;
-}
-
-static int serial8250_resume(struct platform_device *dev)
-{
-       int i;
-
-       for (i = 0; i < UART_NR; i++) {
-               struct uart_8250_port *up = &serial8250_ports[i];
-
-               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
-                       serial8250_resume_port(i);
-       }
-
-       return 0;
-}
-
-static struct platform_driver serial8250_isa_driver = {
-       .probe          = serial8250_probe,
-       .remove         = serial8250_remove,
-       .suspend        = serial8250_suspend,
-       .resume         = serial8250_resume,
-       .driver         = {
-               .name   = "serial8250",
-               .owner  = THIS_MODULE,
-       },
-};
-
-/*
- * This "device" covers _all_ ISA 8250-compatible serial devices listed
- * in the table in include/asm/serial.h
- */
-static struct platform_device *serial8250_isa_devs;
-
-/*
- * serial8250_register_8250_port and serial8250_unregister_port allows for
- * 16x50 serial ports to be configured at run-time, to support PCMCIA
- * modems and PCI multiport cards.
- */
-static DEFINE_MUTEX(serial_mutex);
-
-static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
-{
-       int i;
-
-       /*
-        * First, find a port entry which matches.
-        */
-       for (i = 0; i < nr_uarts; i++)
-               if (uart_match_port(&serial8250_ports[i].port, port))
-                       return &serial8250_ports[i];
-
-       /*
-        * We didn't find a matching entry, so look for the first
-        * free entry.  We look for one which hasn't been previously
-        * used (indicated by zero iobase).
-        */
-       for (i = 0; i < nr_uarts; i++)
-               if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
-                   serial8250_ports[i].port.iobase == 0)
-                       return &serial8250_ports[i];
-
-       /*
-        * That also failed.  Last resort is to find any entry which
-        * doesn't have a real port associated with it.
-        */
-       for (i = 0; i < nr_uarts; i++)
-               if (serial8250_ports[i].port.type == PORT_UNKNOWN)
-                       return &serial8250_ports[i];
-
-       return NULL;
-}
-
-/**
- *     serial8250_register_8250_port - register a serial port
- *     @up: serial port template
- *
- *     Configure the serial port specified by the request. If the
- *     port exists and is in use, it is hung up and unregistered
- *     first.
- *
- *     The port is then probed and if necessary the IRQ is autodetected
- *     If this fails an error is returned.
- *
- *     On success the port is ready to use and the line number is returned.
- */
-int serial8250_register_8250_port(struct uart_8250_port *up)
-{
-       struct uart_8250_port *uart;
-       int ret = -ENOSPC;
-
-       if (up->port.uartclk == 0)
-               return -EINVAL;
-
-       mutex_lock(&serial_mutex);
-
-       uart = serial8250_find_match_or_unused(&up->port);
-       if (uart && uart->port.type != PORT_8250_CIR) {
-               if (uart->port.dev)
-                       uart_remove_one_port(&serial8250_reg, &uart->port);
-
-               uart->port.iobase       = up->port.iobase;
-               uart->port.membase      = up->port.membase;
-               uart->port.irq          = up->port.irq;
-               uart->port.irqflags     = up->port.irqflags;
-               uart->port.uartclk      = up->port.uartclk;
-               uart->port.fifosize     = up->port.fifosize;
-               uart->port.regshift     = up->port.regshift;
-               uart->port.iotype       = up->port.iotype;
-               uart->port.flags        = up->port.flags | UPF_BOOT_AUTOCONF;
-               uart->bugs              = up->bugs;
-               uart->port.mapbase      = up->port.mapbase;
-               uart->port.private_data = up->port.private_data;
-               uart->port.fifosize     = up->port.fifosize;
-               uart->tx_loadsz         = up->tx_loadsz;
-               uart->capabilities      = up->capabilities;
-
-               if (up->port.dev)
-                       uart->port.dev = up->port.dev;
-
-               if (up->port.flags & UPF_FIXED_TYPE)
-                       serial8250_init_fixed_type_port(uart, up->port.type);
-
-               set_io_from_upio(&uart->port);
-               /* Possibly override default I/O functions.  */
-               if (up->port.serial_in)
-                       uart->port.serial_in = up->port.serial_in;
-               if (up->port.serial_out)
-                       uart->port.serial_out = up->port.serial_out;
-               if (up->port.handle_irq)
-                       uart->port.handle_irq = up->port.handle_irq;
-               /*  Possibly override set_termios call */
-               if (up->port.set_termios)
-                       uart->port.set_termios = up->port.set_termios;
-               if (up->port.pm)
-                       uart->port.pm = up->port.pm;
-               if (up->port.handle_break)
-                       uart->port.handle_break = up->port.handle_break;
-               if (up->dl_read)
-                       uart->dl_read = up->dl_read;
-               if (up->dl_write)
-                       uart->dl_write = up->dl_write;
-               if (up->dma)
-                       uart->dma = up->dma;
-
-               if (serial8250_isa_config != NULL)
-                       serial8250_isa_config(0, &uart->port,
-                                       &uart->capabilities);
-
-               ret = uart_add_one_port(&serial8250_reg, &uart->port);
-               if (ret == 0)
-                       ret = uart->port.line;
-       }
-       mutex_unlock(&serial_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL(serial8250_register_8250_port);
-
-/**
- *     serial8250_unregister_port - remove a 16x50 serial port at runtime
- *     @line: serial line number
- *
- *     Remove one serial port.  This may not be called from interrupt
- *     context.  We hand the port back to the our control.
- */
-void serial8250_unregister_port(int line)
-{
-       struct uart_8250_port *uart = &serial8250_ports[line];
-
-       mutex_lock(&serial_mutex);
-       uart_remove_one_port(&serial8250_reg, &uart->port);
-       if (serial8250_isa_devs) {
-               uart->port.flags &= ~UPF_BOOT_AUTOCONF;
-               uart->port.type = PORT_UNKNOWN;
-               uart->port.dev = &serial8250_isa_devs->dev;
-               uart->capabilities = uart_config[uart->port.type].flags;
-               uart_add_one_port(&serial8250_reg, &uart->port);
-       } else {
-               uart->port.dev = NULL;
-       }
-       mutex_unlock(&serial_mutex);
-}
-EXPORT_SYMBOL(serial8250_unregister_port);
-
-static int __init serial8250_init(void)
-{
-       int ret;
-
-       serial8250_isa_init_ports();
-
-       printk(KERN_INFO "Serial: 8250/16550 driver, "
-               "%d ports, IRQ sharing %sabled\n", nr_uarts,
-               share_irqs ? "en" : "dis");
-
-#ifdef CONFIG_SPARC
-       ret = sunserial_register_minors(&serial8250_reg, UART_NR);
-#else
-       serial8250_reg.nr = UART_NR;
-       ret = uart_register_driver(&serial8250_reg);
-#endif
-       if (ret)
-               goto out;
-
-       ret = serial8250_pnp_init();
-       if (ret)
-               goto unreg_uart_drv;
-
-       serial8250_isa_devs = platform_device_alloc("serial8250",
-                                                   PLAT8250_DEV_LEGACY);
-       if (!serial8250_isa_devs) {
-               ret = -ENOMEM;
-               goto unreg_pnp;
-       }
-
-       ret = platform_device_add(serial8250_isa_devs);
-       if (ret)
-               goto put_dev;
-
-       serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
-
-       ret = platform_driver_register(&serial8250_isa_driver);
-       if (ret == 0)
-               goto out;
-
-       platform_device_del(serial8250_isa_devs);
-put_dev:
-       platform_device_put(serial8250_isa_devs);
-unreg_pnp:
-       serial8250_pnp_exit();
-unreg_uart_drv:
-#ifdef CONFIG_SPARC
-       sunserial_unregister_minors(&serial8250_reg, UART_NR);
-#else
-       uart_unregister_driver(&serial8250_reg);
-#endif
-out:
-       return ret;
-}
-
-static void __exit serial8250_exit(void)
-{
-       struct platform_device *isa_dev = serial8250_isa_devs;
-
-       /*
-        * This tells serial8250_unregister_port() not to re-register
-        * the ports (thereby making serial8250_isa_driver permanently
-        * in use.)
-        */
-       serial8250_isa_devs = NULL;
-
-       platform_driver_unregister(&serial8250_isa_driver);
-       platform_device_unregister(isa_dev);
-
-       serial8250_pnp_exit();
-
-#ifdef CONFIG_SPARC
-       sunserial_unregister_minors(&serial8250_reg, UART_NR);
-#else
-       uart_unregister_driver(&serial8250_reg);
-#endif
-}
-
-module_init(serial8250_init);
-module_exit(serial8250_exit);
-
-EXPORT_SYMBOL(serial8250_suspend_port);
-EXPORT_SYMBOL(serial8250_resume_port);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
-
-module_param(share_irqs, uint, 0644);
-MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
-       " (unsafe)");
-
-module_param(nr_uarts, uint, 0644);
-MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
-
-module_param(skip_txen_test, uint, 0644);
-MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
-
-#ifdef CONFIG_SERIAL_8250_RSA
-module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
-MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
-#endif
-MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
-
-#ifndef MODULE
-/* This module was renamed to 8250_core in 3.7.  Keep the old "8250" name
- * working as well for the module options so we don't break people.  We
- * need to keep the names identical and the convenient macros will happily
- * refuse to let us do that by failing the build with redefinition errors
- * of global variables.  So we stick them inside a dummy function to avoid
- * those conflicts.  The options still get parsed, and the redefined
- * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive.
- *
- * This is hacky.  I'm sorry.
- */
-static void __used s8250_options(void)
-{
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "8250."
-
-       module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644);
-       module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644);
-       module_param_cb(skip_txen_test, &param_ops_uint, &skip_txen_test, 0644);
-#ifdef CONFIG_SERIAL_8250_RSA
-       __module_param_call(MODULE_PARAM_PREFIX, probe_rsa,
-               &param_array_ops, .arr = &__param_arr_probe_rsa,
-               0444, -1);
-#endif
-}
-#else
-MODULE_ALIAS("8250");
-#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
new file mode 100644 (file)
index 0000000..35f9c96
--- /dev/null
@@ -0,0 +1,3450 @@
+/*
+ *  Driver for 8250/16550-type serial ports
+ *
+ *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ *  Copyright (C) 2001 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * A note about mapbase / membase
+ *
+ *  mapbase is the physical address of the IO port.
+ *  membase is an 'ioremapped' cookie.
+ */
+
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/ratelimit.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/nmi.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#ifdef CONFIG_SPARC
+#include <linux/sunserialcore.h>
+#endif
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8250.h"
+
+/*
+ * Configuration:
+ *   share_irqs - whether we pass IRQF_SHARED to request_irq().  This option
+ *                is unsafe when used on edge-triggered interrupts.
+ */
+static unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
+
+static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
+
+static struct uart_driver serial8250_reg;
+
+static int serial_index(struct uart_port *port)
+{
+       return (serial8250_reg.minor - 64) + port->line;
+}
+
+static unsigned int skip_txen_test; /* force skip of txen test at init time */
+
+/*
+ * Debugging.
+ */
+#if 0
+#define DEBUG_AUTOCONF(fmt...) printk(fmt)
+#else
+#define DEBUG_AUTOCONF(fmt...) do { } while (0)
+#endif
+
+#if 0
+#define DEBUG_INTR(fmt...)     printk(fmt)
+#else
+#define DEBUG_INTR(fmt...)     do { } while (0)
+#endif
+
+#define PASS_LIMIT     512
+
+#define BOTH_EMPTY     (UART_LSR_TEMT | UART_LSR_THRE)
+
+
+#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
+#define CONFIG_SERIAL_DETECT_IRQ 1
+#endif
+#ifdef CONFIG_SERIAL_8250_MANY_PORTS
+#define CONFIG_SERIAL_MANY_PORTS 1
+#endif
+
+/*
+ * HUB6 is always on.  This will be removed once the header
+ * files have been cleaned.
+ */
+#define CONFIG_HUB6 1
+
+#include <asm/serial.h>
+/*
+ * SERIAL_PORT_DFNS tells us about built-in ports that have no
+ * standard enumeration mechanism.   Platforms that can find all
+ * serial ports via mechanisms like ACPI or PCI need not supply it.
+ */
+#ifndef SERIAL_PORT_DFNS
+#define SERIAL_PORT_DFNS
+#endif
+
+static const struct old_serial_port old_serial_port[] = {
+       SERIAL_PORT_DFNS /* defined in asm/serial.h */
+};
+
+#define UART_NR        CONFIG_SERIAL_8250_NR_UARTS
+
+#ifdef CONFIG_SERIAL_8250_RSA
+
+#define PORT_RSA_MAX 4
+static unsigned long probe_rsa[PORT_RSA_MAX];
+static unsigned int probe_rsa_count;
+#endif /* CONFIG_SERIAL_8250_RSA  */
+
+struct irq_info {
+       struct                  hlist_node node;
+       int                     irq;
+       spinlock_t              lock;   /* Protects list not the hash */
+       struct list_head        *head;
+};
+
+#define NR_IRQ_HASH            32      /* Can be adjusted later */
+static struct hlist_head irq_lists[NR_IRQ_HASH];
+static DEFINE_MUTEX(hash_mutex);       /* Used to walk the hash */
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial8250_config uart_config[] = {
+       [PORT_UNKNOWN] = {
+               .name           = "unknown",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_8250] = {
+               .name           = "8250",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16450] = {
+               .name           = "16450",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16550] = {
+               .name           = "16550",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16550A] = {
+               .name           = "16550A",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_CIRRUS] = {
+               .name           = "Cirrus",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16650] = {
+               .name           = "ST16650",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16650V2] = {
+               .name           = "ST16650V2",
+               .fifo_size      = 32,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_00,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16750] = {
+               .name           = "TI16750",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+                                 UART_FCR7_64BYTE,
+               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
+       },
+       [PORT_STARTECH] = {
+               .name           = "Startech",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16C950] = {
+               .name           = "16C950/954",
+               .fifo_size      = 128,
+               .tx_loadsz      = 128,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP,
+       },
+       [PORT_16654] = {
+               .name           = "ST16654",
+               .fifo_size      = 64,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16850] = {
+               .name           = "XR16850",
+               .fifo_size      = 128,
+               .tx_loadsz      = 128,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_RSA] = {
+               .name           = "RSA",
+               .fifo_size      = 2048,
+               .tx_loadsz      = 2048,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_NS16550A] = {
+               .name           = "NS16550A",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_NATSEMI,
+       },
+       [PORT_XSCALE] = {
+               .name           = "XScale",
+               .fifo_size      = 32,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
+       },
+       [PORT_OCTEON] = {
+               .name           = "OCTEON",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_AR7] = {
+               .name           = "AR7",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_U6_16550A] = {
+               .name           = "U6_16550A",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_TEGRA] = {
+               .name           = "Tegra",
+               .fifo_size      = 32,
+               .tx_loadsz      = 8,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_01,
+               .flags          = UART_CAP_FIFO | UART_CAP_RTOIE,
+       },
+       [PORT_XR17D15X] = {
+               .name           = "XR17D15X",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+                                 UART_CAP_SLEEP,
+       },
+       [PORT_XR17V35X] = {
+               .name           = "XR17V35X",
+               .fifo_size      = 256,
+               .tx_loadsz      = 256,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
+                                 UART_FCR_T_TRIG_11,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+                                 UART_CAP_SLEEP,
+       },
+       [PORT_LPC3220] = {
+               .name           = "LPC3220",
+               .fifo_size      = 64,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
+                                 UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_BRCM_TRUMANAGE] = {
+               .name           = "TruManage",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1024,
+               .flags          = UART_CAP_HFIFO,
+       },
+       [PORT_8250_CIR] = {
+               .name           = "CIR port"
+       },
+       [PORT_ALTR_16550_F32] = {
+               .name           = "Altera 16550 FIFO32",
+               .fifo_size      = 32,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_ALTR_16550_F64] = {
+               .name           = "Altera 16550 FIFO64",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_ALTR_16550_F128] = {
+               .name           = "Altera 16550 FIFO128",
+               .fifo_size      = 128,
+               .tx_loadsz      = 128,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+};
+
+/* Uart divisor latch read */
+static int default_serial_dl_read(struct uart_8250_port *up)
+{
+       return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+}
+
+/* Uart divisor latch write */
+static void default_serial_dl_write(struct uart_8250_port *up, int value)
+{
+       serial_out(up, UART_DLL, value & 0xff);
+       serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+
+/* Au1x00/RT288x UART hardware has a weird register layout */
+static const u8 au_io_in_map[] = {
+       [UART_RX]  = 0,
+       [UART_IER] = 2,
+       [UART_IIR] = 3,
+       [UART_LCR] = 5,
+       [UART_MCR] = 6,
+       [UART_LSR] = 7,
+       [UART_MSR] = 8,
+};
+
+static const u8 au_io_out_map[] = {
+       [UART_TX]  = 1,
+       [UART_IER] = 2,
+       [UART_FCR] = 4,
+       [UART_LCR] = 5,
+       [UART_MCR] = 6,
+};
+
+static unsigned int au_serial_in(struct uart_port *p, int offset)
+{
+       offset = au_io_in_map[offset] << p->regshift;
+       return __raw_readl(p->membase + offset);
+}
+
+static void au_serial_out(struct uart_port *p, int offset, int value)
+{
+       offset = au_io_out_map[offset] << p->regshift;
+       __raw_writel(value, p->membase + offset);
+}
+
+/* Au1x00 haven't got a standard divisor latch */
+static int au_serial_dl_read(struct uart_8250_port *up)
+{
+       return __raw_readl(up->port.membase + 0x28);
+}
+
+static void au_serial_dl_write(struct uart_8250_port *up, int value)
+{
+       __raw_writel(value, up->port.membase + 0x28);
+}
+
+#endif
+
+static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+{
+       offset = offset << p->regshift;
+       outb(p->hub6 - 1 + offset, p->iobase);
+       return inb(p->iobase + 1);
+}
+
+static void hub6_serial_out(struct uart_port *p, int offset, int value)
+{
+       offset = offset << p->regshift;
+       outb(p->hub6 - 1 + offset, p->iobase);
+       outb(value, p->iobase + 1);
+}
+
+static unsigned int mem_serial_in(struct uart_port *p, int offset)
+{
+       offset = offset << p->regshift;
+       return readb(p->membase + offset);
+}
+
+static void mem_serial_out(struct uart_port *p, int offset, int value)
+{
+       offset = offset << p->regshift;
+       writeb(value, p->membase + offset);
+}
+
+static void mem32_serial_out(struct uart_port *p, int offset, int value)
+{
+       offset = offset << p->regshift;
+       writel(value, p->membase + offset);
+}
+
+static unsigned int mem32_serial_in(struct uart_port *p, int offset)
+{
+       offset = offset << p->regshift;
+       return readl(p->membase + offset);
+}
+
+static unsigned int io_serial_in(struct uart_port *p, int offset)
+{
+       offset = offset << p->regshift;
+       return inb(p->iobase + offset);
+}
+
+static void io_serial_out(struct uart_port *p, int offset, int value)
+{
+       offset = offset << p->regshift;
+       outb(value, p->iobase + offset);
+}
+
+static int serial8250_default_handle_irq(struct uart_port *port);
+static int exar_handle_irq(struct uart_port *port);
+
+static void set_io_from_upio(struct uart_port *p)
+{
+       struct uart_8250_port *up =
+               container_of(p, struct uart_8250_port, port);
+
+       up->dl_read = default_serial_dl_read;
+       up->dl_write = default_serial_dl_write;
+
+       switch (p->iotype) {
+       case UPIO_HUB6:
+               p->serial_in = hub6_serial_in;
+               p->serial_out = hub6_serial_out;
+               break;
+
+       case UPIO_MEM:
+               p->serial_in = mem_serial_in;
+               p->serial_out = mem_serial_out;
+               break;
+
+       case UPIO_MEM32:
+               p->serial_in = mem32_serial_in;
+               p->serial_out = mem32_serial_out;
+               break;
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+       case UPIO_AU:
+               p->serial_in = au_serial_in;
+               p->serial_out = au_serial_out;
+               up->dl_read = au_serial_dl_read;
+               up->dl_write = au_serial_dl_write;
+               break;
+#endif
+
+       default:
+               p->serial_in = io_serial_in;
+               p->serial_out = io_serial_out;
+               break;
+       }
+       /* Remember loaded iotype */
+       up->cur_iotype = p->iotype;
+       p->handle_irq = serial8250_default_handle_irq;
+}
+
+static void
+serial_port_out_sync(struct uart_port *p, int offset, int value)
+{
+       switch (p->iotype) {
+       case UPIO_MEM:
+       case UPIO_MEM32:
+       case UPIO_AU:
+               p->serial_out(p, offset, value);
+               p->serial_in(p, UART_LCR);      /* safe, no side-effects */
+               break;
+       default:
+               p->serial_out(p, offset, value);
+       }
+}
+
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+       serial_out(up, UART_SCR, offset);
+       serial_out(up, UART_ICR, value);
+}
+
+static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
+{
+       unsigned int value;
+
+       serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+       serial_out(up, UART_SCR, offset);
+       value = serial_in(up, UART_ICR);
+       serial_icr_write(up, UART_ACR, up->acr);
+
+       return value;
+}
+
+/*
+ * FIFO support.
+ */
+static void serial8250_clear_fifos(struct uart_8250_port *p)
+{
+       if (p->capabilities & UART_CAP_FIFO) {
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+                              UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+               serial_out(p, UART_FCR, 0);
+       }
+}
+
+void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
+{
+       unsigned char fcr;
+
+       serial8250_clear_fifos(p);
+       fcr = uart_config[p->port.type].fcr;
+       serial_out(p, UART_FCR, fcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
+
+/*
+ * IER sleep support.  UARTs which have EFRs need the "extended
+ * capability" bit enabled.  Note that on XR16C850s, we need to
+ * reset LCR to write to IER.
+ */
+static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+{
+       /*
+        * Exar UARTs have a SLEEP register that enables or disables
+        * each UART to enter sleep mode separately.  On the XR17V35x the
+        * register is accessible to each UART at the UART_EXAR_SLEEP
+        * offset but the UART channel may only write to the corresponding
+        * bit.
+        */
+       if ((p->port.type == PORT_XR17V35X) ||
+          (p->port.type == PORT_XR17D15X)) {
+               serial_out(p, UART_EXAR_SLEEP, 0xff);
+               return;
+       }
+
+       if (p->capabilities & UART_CAP_SLEEP) {
+               if (p->capabilities & UART_CAP_EFR) {
+                       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+                       serial_out(p, UART_EFR, UART_EFR_ECB);
+                       serial_out(p, UART_LCR, 0);
+               }
+               serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
+               if (p->capabilities & UART_CAP_EFR) {
+                       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+                       serial_out(p, UART_EFR, 0);
+                       serial_out(p, UART_LCR, 0);
+               }
+       }
+}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+/*
+ * Attempts to turn on the RSA FIFO.  Returns zero on failure.
+ * We set the port uart clock rate if we succeed.
+ */
+static int __enable_rsa(struct uart_8250_port *up)
+{
+       unsigned char mode;
+       int result;
+
+       mode = serial_in(up, UART_RSA_MSR);
+       result = mode & UART_RSA_MSR_FIFO;
+
+       if (!result) {
+               serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+               mode = serial_in(up, UART_RSA_MSR);
+               result = mode & UART_RSA_MSR_FIFO;
+       }
+
+       if (result)
+               up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+
+       return result;
+}
+
+static void enable_rsa(struct uart_8250_port *up)
+{
+       if (up->port.type == PORT_RSA) {
+               if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+                       spin_lock_irq(&up->port.lock);
+                       __enable_rsa(up);
+                       spin_unlock_irq(&up->port.lock);
+               }
+               if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+                       serial_out(up, UART_RSA_FRR, 0);
+       }
+}
+
+/*
+ * Attempts to turn off the RSA FIFO.  Returns zero on failure.
+ * It is unknown why interrupts were disabled in here.  However,
+ * the caller is expected to preserve this behaviour by grabbing
+ * the spinlock before calling this function.
+ */
+static void disable_rsa(struct uart_8250_port *up)
+{
+       unsigned char mode;
+       int result;
+
+       if (up->port.type == PORT_RSA &&
+           up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+               spin_lock_irq(&up->port.lock);
+
+               mode = serial_in(up, UART_RSA_MSR);
+               result = !(mode & UART_RSA_MSR_FIFO);
+
+               if (!result) {
+                       serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+                       mode = serial_in(up, UART_RSA_MSR);
+                       result = !(mode & UART_RSA_MSR_FIFO);
+               }
+
+               if (result)
+                       up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+               spin_unlock_irq(&up->port.lock);
+       }
+}
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+/*
+ * This is a quickie test to see how big the FIFO is.
+ * It doesn't work at all the time, more's the pity.
+ */
+static int size_fifo(struct uart_8250_port *up)
+{
+       unsigned char old_fcr, old_mcr, old_lcr;
+       unsigned short old_dl;
+       int count;
+
+       old_lcr = serial_in(up, UART_LCR);
+       serial_out(up, UART_LCR, 0);
+       old_fcr = serial_in(up, UART_FCR);
+       old_mcr = serial_in(up, UART_MCR);
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+                   UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+       serial_out(up, UART_MCR, UART_MCR_LOOP);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       old_dl = serial_dl_read(up);
+       serial_dl_write(up, 0x0001);
+       serial_out(up, UART_LCR, 0x03);
+       for (count = 0; count < 256; count++)
+               serial_out(up, UART_TX, count);
+       mdelay(20);/* FIXME - schedule_timeout */
+       for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
+            (count < 256); count++)
+               serial_in(up, UART_RX);
+       serial_out(up, UART_FCR, old_fcr);
+       serial_out(up, UART_MCR, old_mcr);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       serial_dl_write(up, old_dl);
+       serial_out(up, UART_LCR, old_lcr);
+
+       return count;
+}
+
+/*
+ * Read UART ID using the divisor method - set DLL and DLM to zero
+ * and the revision will be in DLL and device type in DLM.  We
+ * preserve the device state across this.
+ */
+static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+{
+       unsigned char old_dll, old_dlm, old_lcr;
+       unsigned int id;
+
+       old_lcr = serial_in(p, UART_LCR);
+       serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+
+       old_dll = serial_in(p, UART_DLL);
+       old_dlm = serial_in(p, UART_DLM);
+
+       serial_out(p, UART_DLL, 0);
+       serial_out(p, UART_DLM, 0);
+
+       id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+
+       serial_out(p, UART_DLL, old_dll);
+       serial_out(p, UART_DLM, old_dlm);
+       serial_out(p, UART_LCR, old_lcr);
+
+       return id;
+}
+
+/*
+ * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
+ * When this function is called we know it is at least a StarTech
+ * 16650 V2, but it might be one of several StarTech UARTs, or one of
+ * its clones.  (We treat the broken original StarTech 16650 V1 as a
+ * 16550, and why not?  Startech doesn't seem to even acknowledge its
+ * existence.)
+ *
+ * What evil have men's minds wrought...
+ */
+static void autoconfig_has_efr(struct uart_8250_port *up)
+{
+       unsigned int id1, id2, id3, rev;
+
+       /*
+        * Everything with an EFR has SLEEP
+        */
+       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+
+       /*
+        * First we check to see if it's an Oxford Semiconductor UART.
+        *
+        * If we have to do this here because some non-National
+        * Semiconductor clone chips lock up if you try writing to the
+        * LSR register (which serial_icr_read does)
+        */
+
+       /*
+        * Check for Oxford Semiconductor 16C950.
+        *
+        * EFR [4] must be set else this test fails.
+        *
+        * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
+        * claims that it's needed for 952 dual UART's (which are not
+        * recommended for new designs).
+        */
+       up->acr = 0;
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       serial_out(up, UART_EFR, UART_EFR_ECB);
+       serial_out(up, UART_LCR, 0x00);
+       id1 = serial_icr_read(up, UART_ID1);
+       id2 = serial_icr_read(up, UART_ID2);
+       id3 = serial_icr_read(up, UART_ID3);
+       rev = serial_icr_read(up, UART_REV);
+
+       DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
+
+       if (id1 == 0x16 && id2 == 0xC9 &&
+           (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
+               up->port.type = PORT_16C950;
+
+               /*
+                * Enable work around for the Oxford Semiconductor 952 rev B
+                * chip which causes it to seriously miscalculate baud rates
+                * when DLL is 0.
+                */
+               if (id3 == 0x52 && rev == 0x01)
+                       up->bugs |= UART_BUG_QUOT;
+               return;
+       }
+
+       /*
+        * We check for a XR16C850 by setting DLL and DLM to 0, and then
+        * reading back DLL and DLM.  The chip type depends on the DLM
+        * value read back:
+        *  0x10 - XR16C850 and the DLL contains the chip revision.
+        *  0x12 - XR16C2850.
+        *  0x14 - XR16C854.
+        */
+       id1 = autoconfig_read_divisor_id(up);
+       DEBUG_AUTOCONF("850id=%04x ", id1);
+
+       id2 = id1 >> 8;
+       if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
+               up->port.type = PORT_16850;
+               return;
+       }
+
+       /*
+        * It wasn't an XR16C850.
+        *
+        * We distinguish between the '654 and the '650 by counting
+        * how many bytes are in the FIFO.  I'm using this for now,
+        * since that's the technique that was sent to me in the
+        * serial driver update, but I'm not convinced this works.
+        * I've had problems doing this in the past.  -TYT
+        */
+       if (size_fifo(up) == 64)
+               up->port.type = PORT_16654;
+       else
+               up->port.type = PORT_16650V2;
+}
+
+/*
+ * We detected a chip without a FIFO.  Only two fall into
+ * this category - the original 8250 and the 16450.  The
+ * 16450 has a scratch register (accessible with LCR=0)
+ */
+static void autoconfig_8250(struct uart_8250_port *up)
+{
+       unsigned char scratch, status1, status2;
+
+       up->port.type = PORT_8250;
+
+       scratch = serial_in(up, UART_SCR);
+       serial_out(up, UART_SCR, 0xa5);
+       status1 = serial_in(up, UART_SCR);
+       serial_out(up, UART_SCR, 0x5a);
+       status2 = serial_in(up, UART_SCR);
+       serial_out(up, UART_SCR, scratch);
+
+       if (status1 == 0xa5 && status2 == 0x5a)
+               up->port.type = PORT_16450;
+}
+
+static int broken_efr(struct uart_8250_port *up)
+{
+       /*
+        * Exar ST16C2550 "A2" devices incorrectly detect as
+        * having an EFR, and report an ID of 0x0201.  See
+        * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html 
+        */
+       if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
+               return 1;
+
+       return 0;
+}
+
+static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+{
+       unsigned char status;
+
+       status = serial_in(up, 0x04); /* EXCR2 */
+#define PRESL(x) ((x) & 0x30)
+       if (PRESL(status) == 0x10) {
+               /* already in high speed mode */
+               return 0;
+       } else {
+               status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
+               status |= 0x10;  /* 1.625 divisor for baud_base --> 921600 */
+               serial_out(up, 0x04, status);
+       }
+       return 1;
+}
+
+/*
+ * We know that the chip has FIFOs.  Does it have an EFR?  The
+ * EFR is located in the same register position as the IIR and
+ * we know the top two bits of the IIR are currently set.  The
+ * EFR should contain zero.  Try to read the EFR.
+ */
+static void autoconfig_16550a(struct uart_8250_port *up)
+{
+       unsigned char status1, status2;
+       unsigned int iersave;
+
+       up->port.type = PORT_16550A;
+       up->capabilities |= UART_CAP_FIFO;
+
+       /*
+        * XR17V35x UARTs have an extra divisor register, DLD
+        * that gets enabled with when DLAB is set which will
+        * cause the device to incorrectly match and assign
+        * port type to PORT_16650.  The EFR for this UART is
+        * found at offset 0x09. Instead check the Deice ID (DVID)
+        * register for a 2, 4 or 8 port UART.
+        */
+       if (up->port.flags & UPF_EXAR_EFR) {
+               status1 = serial_in(up, UART_EXAR_DVID);
+               if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
+                       DEBUG_AUTOCONF("Exar XR17V35x ");
+                       up->port.type = PORT_XR17V35X;
+                       up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+                                               UART_CAP_SLEEP;
+
+                       return;
+               }
+
+       }
+
+       /*
+        * Check for presence of the EFR when DLAB is set.
+        * Only ST16C650V1 UARTs pass this test.
+        */
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       if (serial_in(up, UART_EFR) == 0) {
+               serial_out(up, UART_EFR, 0xA8);
+               if (serial_in(up, UART_EFR) != 0) {
+                       DEBUG_AUTOCONF("EFRv1 ");
+                       up->port.type = PORT_16650;
+                       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+               } else {
+                       DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+               }
+               serial_out(up, UART_EFR, 0);
+               return;
+       }
+
+       /*
+        * Maybe it requires 0xbf to be written to the LCR.
+        * (other ST16C650V2 UARTs, TI16C752A, etc)
+        */
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
+               DEBUG_AUTOCONF("EFRv2 ");
+               autoconfig_has_efr(up);
+               return;
+       }
+
+       /*
+        * Check for a National Semiconductor SuperIO chip.
+        * Attempt to switch to bank 2, read the value of the LOOP bit
+        * from EXCR1. Switch back to bank 0, change it in MCR. Then
+        * switch back to bank 2, read it from EXCR1 again and check
+        * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
+        */
+       serial_out(up, UART_LCR, 0);
+       status1 = serial_in(up, UART_MCR);
+       serial_out(up, UART_LCR, 0xE0);
+       status2 = serial_in(up, 0x02); /* EXCR1 */
+
+       if (!((status2 ^ status1) & UART_MCR_LOOP)) {
+               serial_out(up, UART_LCR, 0);
+               serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+               serial_out(up, UART_LCR, 0xE0);
+               status2 = serial_in(up, 0x02); /* EXCR1 */
+               serial_out(up, UART_LCR, 0);
+               serial_out(up, UART_MCR, status1);
+
+               if ((status2 ^ status1) & UART_MCR_LOOP) {
+                       unsigned short quot;
+
+                       serial_out(up, UART_LCR, 0xE0);
+
+                       quot = serial_dl_read(up);
+                       quot <<= 3;
+
+                       if (ns16550a_goto_highspeed(up))
+                               serial_dl_write(up, quot);
+
+                       serial_out(up, UART_LCR, 0);
+
+                       up->port.uartclk = 921600*16;
+                       up->port.type = PORT_NS16550A;
+                       up->capabilities |= UART_NATSEMI;
+                       return;
+               }
+       }
+
+       /*
+        * No EFR.  Try to detect a TI16750, which only sets bit 5 of
+        * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
+        * Try setting it with and without DLAB set.  Cheap clones
+        * set bit 5 without DLAB set.
+        */
+       serial_out(up, UART_LCR, 0);
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+       status1 = serial_in(up, UART_IIR) >> 5;
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+       status2 = serial_in(up, UART_IIR) >> 5;
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       serial_out(up, UART_LCR, 0);
+
+       DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
+
+       if (status1 == 6 && status2 == 7) {
+               up->port.type = PORT_16750;
+               up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
+               return;
+       }
+
+       /*
+        * Try writing and reading the UART_IER_UUE bit (b6).
+        * If it works, this is probably one of the Xscale platform's
+        * internal UARTs.
+        * We're going to explicitly set the UUE bit to 0 before
+        * trying to write and read a 1 just to make sure it's not
+        * already a 1 and maybe locked there before we even start start.
+        */
+       iersave = serial_in(up, UART_IER);
+       serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
+       if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
+               /*
+                * OK it's in a known zero state, try writing and reading
+                * without disturbing the current state of the other bits.
+                */
+               serial_out(up, UART_IER, iersave | UART_IER_UUE);
+               if (serial_in(up, UART_IER) & UART_IER_UUE) {
+                       /*
+                        * It's an Xscale.
+                        * We'll leave the UART_IER_UUE bit set to 1 (enabled).
+                        */
+                       DEBUG_AUTOCONF("Xscale ");
+                       up->port.type = PORT_XSCALE;
+                       up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
+                       return;
+               }
+       } else {
+               /*
+                * If we got here we couldn't force the IER_UUE bit to 0.
+                * Log it and continue.
+                */
+               DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
+       }
+       serial_out(up, UART_IER, iersave);
+
+       /*
+        * Exar uarts have EFR in a weird location
+        */
+       if (up->port.flags & UPF_EXAR_EFR) {
+               DEBUG_AUTOCONF("Exar XR17D15x ");
+               up->port.type = PORT_XR17D15X;
+               up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+                                   UART_CAP_SLEEP;
+
+               return;
+       }
+
+       /*
+        * We distinguish between 16550A and U6 16550A by counting
+        * how many bytes are in the FIFO.
+        */
+       if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+               up->port.type = PORT_U6_16550A;
+               up->capabilities |= UART_CAP_AFE;
+       }
+}
+
+/*
+ * This routine is called by rs_init() to initialize a specific serial
+ * port.  It determines what type of UART chip this serial port is
+ * using: 8250, 16450, 16550, 16550A.  The important question is
+ * whether or not this UART is a 16550A or not, since this will
+ * determine whether or not we can use its FIFO features or not.
+ */
+static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
+{
+       unsigned char status1, scratch, scratch2, scratch3;
+       unsigned char save_lcr, save_mcr;
+       struct uart_port *port = &up->port;
+       unsigned long flags;
+       unsigned int old_capabilities;
+
+       if (!port->iobase && !port->mapbase && !port->membase)
+               return;
+
+       DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+                      serial_index(port), port->iobase, port->membase);
+
+       /*
+        * We really do need global IRQs disabled here - we're going to
+        * be frobbing the chips IRQ enable register to see if it exists.
+        */
+       spin_lock_irqsave(&port->lock, flags);
+
+       up->capabilities = 0;
+       up->bugs = 0;
+
+       if (!(port->flags & UPF_BUGGY_UART)) {
+               /*
+                * Do a simple existence test first; if we fail this,
+                * there's no point trying anything else.
+                *
+                * 0x80 is used as a nonsense port to prevent against
+                * false positives due to ISA bus float.  The
+                * assumption is that 0x80 is a non-existent port;
+                * which should be safe since include/asm/io.h also
+                * makes this assumption.
+                *
+                * Note: this is safe as long as MCR bit 4 is clear
+                * and the device is in "PC" mode.
+                */
+               scratch = serial_in(up, UART_IER);
+               serial_out(up, UART_IER, 0);
+#ifdef __i386__
+               outb(0xff, 0x080);
+#endif
+               /*
+                * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
+                * 16C754B) allow only to modify them if an EFR bit is set.
+                */
+               scratch2 = serial_in(up, UART_IER) & 0x0f;
+               serial_out(up, UART_IER, 0x0F);
+#ifdef __i386__
+               outb(0, 0x080);
+#endif
+               scratch3 = serial_in(up, UART_IER) & 0x0f;
+               serial_out(up, UART_IER, scratch);
+               if (scratch2 != 0 || scratch3 != 0x0F) {
+                       /*
+                        * We failed; there's nothing here
+                        */
+                       spin_unlock_irqrestore(&port->lock, flags);
+                       DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+                                      scratch2, scratch3);
+                       goto out;
+               }
+       }
+
+       save_mcr = serial_in(up, UART_MCR);
+       save_lcr = serial_in(up, UART_LCR);
+
+       /*
+        * Check to see if a UART is really there.  Certain broken
+        * internal modems based on the Rockwell chipset fail this
+        * test, because they apparently don't implement the loopback
+        * test mode.  So this test is skipped on the COM 1 through
+        * COM 4 ports.  This *should* be safe, since no board
+        * manufacturer would be stupid enough to design a board
+        * that conflicts with COM 1-4 --- we hope!
+        */
+       if (!(port->flags & UPF_SKIP_TEST)) {
+               serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+               status1 = serial_in(up, UART_MSR) & 0xF0;
+               serial_out(up, UART_MCR, save_mcr);
+               if (status1 != 0x90) {
+                       spin_unlock_irqrestore(&port->lock, flags);
+                       DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+                                      status1);
+                       goto out;
+               }
+       }
+
+       /*
+        * We're pretty sure there's a port here.  Lets find out what
+        * type of port it is.  The IIR top two bits allows us to find
+        * out if it's 8250 or 16450, 16550, 16550A or later.  This
+        * determines what we test for next.
+        *
+        * We also initialise the EFR (if any) to zero for later.  The
+        * EFR occupies the same register location as the FCR and IIR.
+        */
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       serial_out(up, UART_EFR, 0);
+       serial_out(up, UART_LCR, 0);
+
+       serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       scratch = serial_in(up, UART_IIR) >> 6;
+
+       switch (scratch) {
+       case 0:
+               autoconfig_8250(up);
+               break;
+       case 1:
+               port->type = PORT_UNKNOWN;
+               break;
+       case 2:
+               port->type = PORT_16550;
+               break;
+       case 3:
+               autoconfig_16550a(up);
+               break;
+       }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * Only probe for RSA ports if we got the region.
+        */
+       if (port->type == PORT_16550A && probeflags & PROBE_RSA) {
+               int i;
+
+               for (i = 0 ; i < probe_rsa_count; ++i) {
+                       if (probe_rsa[i] == port->iobase && __enable_rsa(up)) {
+                               port->type = PORT_RSA;
+                               break;
+                       }
+               }
+       }
+#endif
+
+       serial_out(up, UART_LCR, save_lcr);
+
+       port->fifosize = uart_config[up->port.type].fifo_size;
+       old_capabilities = up->capabilities; 
+       up->capabilities = uart_config[port->type].flags;
+       up->tx_loadsz = uart_config[port->type].tx_loadsz;
+
+       if (port->type == PORT_UNKNOWN)
+               goto out_lock;
+
+       /*
+        * Reset the UART.
+        */
+#ifdef CONFIG_SERIAL_8250_RSA
+       if (port->type == PORT_RSA)
+               serial_out(up, UART_RSA_FRR, 0);
+#endif
+       serial_out(up, UART_MCR, save_mcr);
+       serial8250_clear_fifos(up);
+       serial_in(up, UART_RX);
+       if (up->capabilities & UART_CAP_UUE)
+               serial_out(up, UART_IER, UART_IER_UUE);
+       else
+               serial_out(up, UART_IER, 0);
+
+out_lock:
+       spin_unlock_irqrestore(&port->lock, flags);
+       if (up->capabilities != old_capabilities) {
+               printk(KERN_WARNING
+                      "ttyS%d: detected caps %08x should be %08x\n",
+                      serial_index(port), old_capabilities,
+                      up->capabilities);
+       }
+out:
+       DEBUG_AUTOCONF("iir=%d ", scratch);
+       DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
+}
+
+static void autoconfig_irq(struct uart_8250_port *up)
+{
+       struct uart_port *port = &up->port;
+       unsigned char save_mcr, save_ier;
+       unsigned char save_ICP = 0;
+       unsigned int ICP = 0;
+       unsigned long irqs;
+       int irq;
+
+       if (port->flags & UPF_FOURPORT) {
+               ICP = (port->iobase & 0xfe0) | 0x1f;
+               save_ICP = inb_p(ICP);
+               outb_p(0x80, ICP);
+               inb_p(ICP);
+       }
+
+       /* forget possible initially masked and pending IRQ */
+       probe_irq_off(probe_irq_on());
+       save_mcr = serial_in(up, UART_MCR);
+       save_ier = serial_in(up, UART_IER);
+       serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+       irqs = probe_irq_on();
+       serial_out(up, UART_MCR, 0);
+       udelay(10);
+       if (port->flags & UPF_FOURPORT) {
+               serial_out(up, UART_MCR,
+                           UART_MCR_DTR | UART_MCR_RTS);
+       } else {
+               serial_out(up, UART_MCR,
+                           UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+       }
+       serial_out(up, UART_IER, 0x0f); /* enable all intrs */
+       serial_in(up, UART_LSR);
+       serial_in(up, UART_RX);
+       serial_in(up, UART_IIR);
+       serial_in(up, UART_MSR);
+       serial_out(up, UART_TX, 0xFF);
+       udelay(20);
+       irq = probe_irq_off(irqs);
+
+       serial_out(up, UART_MCR, save_mcr);
+       serial_out(up, UART_IER, save_ier);
+
+       if (port->flags & UPF_FOURPORT)
+               outb_p(save_ICP, ICP);
+
+       port->irq = (irq > 0) ? irq : 0;
+}
+
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+       if (p->ier & UART_IER_THRI) {
+               p->ier &= ~UART_IER_THRI;
+               serial_out(p, UART_IER, p->ier);
+       }
+}
+
+static void serial8250_stop_tx(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       __stop_tx(up);
+
+       /*
+        * We really want to stop the transmitter from sending.
+        */
+       if (port->type == PORT_16C950) {
+               up->acr |= UART_ACR_TXDIS;
+               serial_icr_write(up, UART_ACR, up->acr);
+       }
+}
+
+static void serial8250_start_tx(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       if (up->dma && !serial8250_tx_dma(up)) {
+               return;
+       } else if (!(up->ier & UART_IER_THRI)) {
+               up->ier |= UART_IER_THRI;
+               serial_port_out(port, UART_IER, up->ier);
+
+               if (up->bugs & UART_BUG_TXEN) {
+                       unsigned char lsr;
+                       lsr = serial_in(up, UART_LSR);
+                       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+                       if (lsr & UART_LSR_TEMT)
+                               serial8250_tx_chars(up);
+               }
+       }
+
+       /*
+        * Re-enable the transmitter if we disabled it.
+        */
+       if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+               up->acr &= ~UART_ACR_TXDIS;
+               serial_icr_write(up, UART_ACR, up->acr);
+       }
+}
+
+static void serial8250_stop_rx(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       up->ier &= ~UART_IER_RLSI;
+       up->port.read_status_mask &= ~UART_LSR_DR;
+       serial_port_out(port, UART_IER, up->ier);
+}
+
+static void serial8250_enable_ms(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       /* no MSR capabilities */
+       if (up->bugs & UART_BUG_NOMSR)
+               return;
+
+       up->ier |= UART_IER_MSI;
+       serial_port_out(port, UART_IER, up->ier);
+}
+
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+{
+       struct uart_port *port = &up->port;
+       unsigned char ch;
+       int max_count = 256;
+       char flag;
+
+       do {
+               if (likely(lsr & UART_LSR_DR))
+                       ch = serial_in(up, UART_RX);
+               else
+                       /*
+                        * Intel 82571 has a Serial Over Lan device that will
+                        * set UART_LSR_BI without setting UART_LSR_DR when
+                        * it receives a break. To avoid reading from the
+                        * receive buffer without UART_LSR_DR bit set, we
+                        * just force the read character to be 0
+                        */
+                       ch = 0;
+
+               flag = TTY_NORMAL;
+               port->icount.rx++;
+
+               lsr |= up->lsr_saved_flags;
+               up->lsr_saved_flags = 0;
+
+               if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+                       if (lsr & UART_LSR_BI) {
+                               lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+                               port->icount.brk++;
+                               /*
+                                * We do the SysRQ and SAK checking
+                                * here because otherwise the break
+                                * may get masked by ignore_status_mask
+                                * or read_status_mask.
+                                */
+                               if (uart_handle_break(port))
+                                       goto ignore_char;
+                       } else if (lsr & UART_LSR_PE)
+                               port->icount.parity++;
+                       else if (lsr & UART_LSR_FE)
+                               port->icount.frame++;
+                       if (lsr & UART_LSR_OE)
+                               port->icount.overrun++;
+
+                       /*
+                        * Mask off conditions which should be ignored.
+                        */
+                       lsr &= port->read_status_mask;
+
+                       if (lsr & UART_LSR_BI) {
+                               DEBUG_INTR("handling break....");
+                               flag = TTY_BREAK;
+                       } else if (lsr & UART_LSR_PE)
+                               flag = TTY_PARITY;
+                       else if (lsr & UART_LSR_FE)
+                               flag = TTY_FRAME;
+               }
+               if (uart_handle_sysrq_char(port, ch))
+                       goto ignore_char;
+
+               uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+
+ignore_char:
+               lsr = serial_in(up, UART_LSR);
+       } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+       spin_unlock(&port->lock);
+       tty_flip_buffer_push(&port->state->port);
+       spin_lock(&port->lock);
+       return lsr;
+}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
+
+void serial8250_tx_chars(struct uart_8250_port *up)
+{
+       struct uart_port *port = &up->port;
+       struct circ_buf *xmit = &port->state->xmit;
+       int count;
+
+       if (port->x_char) {
+               serial_out(up, UART_TX, port->x_char);
+               port->icount.tx++;
+               port->x_char = 0;
+               return;
+       }
+       if (uart_tx_stopped(port)) {
+               serial8250_stop_tx(port);
+               return;
+       }
+       if (uart_circ_empty(xmit)) {
+               __stop_tx(up);
+               return;
+       }
+
+       count = up->tx_loadsz;
+       do {
+               serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+               if (uart_circ_empty(xmit))
+                       break;
+               if (up->capabilities & UART_CAP_HFIFO) {
+                       if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+                           BOTH_EMPTY)
+                               break;
+               }
+       } while (--count > 0);
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+
+       DEBUG_INTR("THRE...");
+
+       if (uart_circ_empty(xmit))
+               __stop_tx(up);
+}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
+
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
+{
+       struct uart_port *port = &up->port;
+       unsigned int status = serial_in(up, UART_MSR);
+
+       status |= up->msr_saved_flags;
+       up->msr_saved_flags = 0;
+       if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+           port->state != NULL) {
+               if (status & UART_MSR_TERI)
+                       port->icount.rng++;
+               if (status & UART_MSR_DDSR)
+                       port->icount.dsr++;
+               if (status & UART_MSR_DDCD)
+                       uart_handle_dcd_change(port, status & UART_MSR_DCD);
+               if (status & UART_MSR_DCTS)
+                       uart_handle_cts_change(port, status & UART_MSR_CTS);
+
+               wake_up_interruptible(&port->state->port.delta_msr_wait);
+       }
+
+       return status;
+}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
+
+/*
+ * This handles the interrupt from one port.
+ */
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+{
+       unsigned char status;
+       unsigned long flags;
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       int dma_err = 0;
+
+       if (iir & UART_IIR_NO_INT)
+               return 0;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       status = serial_port_in(port, UART_LSR);
+
+       DEBUG_INTR("status = %x...", status);
+
+       if (status & (UART_LSR_DR | UART_LSR_BI)) {
+               if (up->dma)
+                       dma_err = serial8250_rx_dma(up, iir);
+
+               if (!up->dma || dma_err)
+                       status = serial8250_rx_chars(up, status);
+       }
+       serial8250_modem_status(up);
+       if (status & UART_LSR_THRE)
+               serial8250_tx_chars(up);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+       return 1;
+}
+EXPORT_SYMBOL_GPL(serial8250_handle_irq);
+
+static int serial8250_default_handle_irq(struct uart_port *port)
+{
+       unsigned int iir = serial_port_in(port, UART_IIR);
+
+       return serial8250_handle_irq(port, iir);
+}
+
+/*
+ * These Exar UARTs have an extra interrupt indicator that could
+ * fire for a few unimplemented interrupts.  One of which is a
+ * wakeup event when coming out of sleep.  Put this here just
+ * to be on the safe side that these interrupts don't go unhandled.
+ */
+static int exar_handle_irq(struct uart_port *port)
+{
+       unsigned char int0, int1, int2, int3;
+       unsigned int iir = serial_port_in(port, UART_IIR);
+       int ret;
+
+       ret = serial8250_handle_irq(port, iir);
+
+       if ((port->type == PORT_XR17V35X) ||
+          (port->type == PORT_XR17D15X)) {
+               int0 = serial_port_in(port, 0x80);
+               int1 = serial_port_in(port, 0x81);
+               int2 = serial_port_in(port, 0x82);
+               int3 = serial_port_in(port, 0x83);
+       }
+
+       return ret;
+}
+
+/*
+ * This is the serial driver's interrupt routine.
+ *
+ * Arjan thinks the old way was overly complex, so it got simplified.
+ * Alan disagrees, saying that need the complexity to handle the weird
+ * nature of ISA shared interrupts.  (This is a special exception.)
+ *
+ * In order to handle ISA shared interrupts properly, we need to check
+ * that all ports have been serviced, and therefore the ISA interrupt
+ * line has been de-asserted.
+ *
+ * This means we need to loop through all ports. checking that they
+ * don't have an interrupt pending.
+ */
+static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
+{
+       struct irq_info *i = dev_id;
+       struct list_head *l, *end = NULL;
+       int pass_counter = 0, handled = 0;
+
+       DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+
+       spin_lock(&i->lock);
+
+       l = i->head;
+       do {
+               struct uart_8250_port *up;
+               struct uart_port *port;
+
+               up = list_entry(l, struct uart_8250_port, list);
+               port = &up->port;
+
+               if (port->handle_irq(port)) {
+                       handled = 1;
+                       end = NULL;
+               } else if (end == NULL)
+                       end = l;
+
+               l = l->next;
+
+               if (l == i->head && pass_counter++ > PASS_LIMIT) {
+                       /* If we hit this, we're dead. */
+                       printk_ratelimited(KERN_ERR
+                               "serial8250: too much work for irq%d\n", irq);
+                       break;
+               }
+       } while (l != end);
+
+       spin_unlock(&i->lock);
+
+       DEBUG_INTR("end.\n");
+
+       return IRQ_RETVAL(handled);
+}
+
+/*
+ * To support ISA shared interrupts, we need to have one interrupt
+ * handler that ensures that the IRQ line has been deasserted
+ * before returning.  Failing to do this will result in the IRQ
+ * line being stuck active, and, since ISA irqs are edge triggered,
+ * no more IRQs will be seen.
+ */
+static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
+{
+       spin_lock_irq(&i->lock);
+
+       if (!list_empty(i->head)) {
+               if (i->head == &up->list)
+                       i->head = i->head->next;
+               list_del(&up->list);
+       } else {
+               BUG_ON(i->head != &up->list);
+               i->head = NULL;
+       }
+       spin_unlock_irq(&i->lock);
+       /* List empty so throw away the hash node */
+       if (i->head == NULL) {
+               hlist_del(&i->node);
+               kfree(i);
+       }
+}
+
+static int serial_link_irq_chain(struct uart_8250_port *up)
+{
+       struct hlist_head *h;
+       struct hlist_node *n;
+       struct irq_info *i;
+       int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
+
+       mutex_lock(&hash_mutex);
+
+       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+       hlist_for_each(n, h) {
+               i = hlist_entry(n, struct irq_info, node);
+               if (i->irq == up->port.irq)
+                       break;
+       }
+
+       if (n == NULL) {
+               i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+               if (i == NULL) {
+                       mutex_unlock(&hash_mutex);
+                       return -ENOMEM;
+               }
+               spin_lock_init(&i->lock);
+               i->irq = up->port.irq;
+               hlist_add_head(&i->node, h);
+       }
+       mutex_unlock(&hash_mutex);
+
+       spin_lock_irq(&i->lock);
+
+       if (i->head) {
+               list_add(&up->list, i->head);
+               spin_unlock_irq(&i->lock);
+
+               ret = 0;
+       } else {
+               INIT_LIST_HEAD(&up->list);
+               i->head = &up->list;
+               spin_unlock_irq(&i->lock);
+               irq_flags |= up->port.irqflags;
+               ret = request_irq(up->port.irq, serial8250_interrupt,
+                                 irq_flags, "serial", i);
+               if (ret < 0)
+                       serial_do_unlink(i, up);
+       }
+
+       return ret;
+}
+
+static void serial_unlink_irq_chain(struct uart_8250_port *up)
+{
+       struct irq_info *i;
+       struct hlist_node *n;
+       struct hlist_head *h;
+
+       mutex_lock(&hash_mutex);
+
+       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+       hlist_for_each(n, h) {
+               i = hlist_entry(n, struct irq_info, node);
+               if (i->irq == up->port.irq)
+                       break;
+       }
+
+       BUG_ON(n == NULL);
+       BUG_ON(i->head == NULL);
+
+       if (list_empty(i->head))
+               free_irq(up->port.irq, i);
+
+       serial_do_unlink(i, up);
+       mutex_unlock(&hash_mutex);
+}
+
+/*
+ * This function is used to handle ports that do not have an
+ * interrupt.  This doesn't work very well for 16450's, but gives
+ * barely passable results for a 16550A.  (Although at the expense
+ * of much CPU overhead).
+ */
+static void serial8250_timeout(unsigned long data)
+{
+       struct uart_8250_port *up = (struct uart_8250_port *)data;
+
+       up->port.handle_irq(&up->port);
+       mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
+}
+
+static void serial8250_backup_timeout(unsigned long data)
+{
+       struct uart_8250_port *up = (struct uart_8250_port *)data;
+       unsigned int iir, ier = 0, lsr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&up->port.lock, flags);
+
+       /*
+        * Must disable interrupts or else we risk racing with the interrupt
+        * based handler.
+        */
+       if (up->port.irq) {
+               ier = serial_in(up, UART_IER);
+               serial_out(up, UART_IER, 0);
+       }
+
+       iir = serial_in(up, UART_IIR);
+
+       /*
+        * This should be a safe test for anyone who doesn't trust the
+        * IIR bits on their UART, but it's specifically designed for
+        * the "Diva" UART used on the management processor on many HP
+        * ia64 and parisc boxes.
+        */
+       lsr = serial_in(up, UART_LSR);
+       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+       if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
+           (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
+           (lsr & UART_LSR_THRE)) {
+               iir &= ~(UART_IIR_ID | UART_IIR_NO_INT);
+               iir |= UART_IIR_THRI;
+       }
+
+       if (!(iir & UART_IIR_NO_INT))
+               serial8250_tx_chars(up);
+
+       if (up->port.irq)
+               serial_out(up, UART_IER, ier);
+
+       spin_unlock_irqrestore(&up->port.lock, flags);
+
+       /* Standard timer interval plus 0.2s to keep the port running */
+       mod_timer(&up->timer,
+               jiffies + uart_poll_timeout(&up->port) + HZ / 5);
+}
+
+static unsigned int serial8250_tx_empty(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       unsigned int lsr;
+
+       spin_lock_irqsave(&port->lock, flags);
+       lsr = serial_port_in(port, UART_LSR);
+       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int serial8250_get_mctrl(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned int status;
+       unsigned int ret;
+
+       status = serial8250_modem_status(up);
+
+       ret = 0;
+       if (status & UART_MSR_DCD)
+               ret |= TIOCM_CAR;
+       if (status & UART_MSR_RI)
+               ret |= TIOCM_RNG;
+       if (status & UART_MSR_DSR)
+               ret |= TIOCM_DSR;
+       if (status & UART_MSR_CTS)
+               ret |= TIOCM_CTS;
+       return ret;
+}
+
+static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned char mcr = 0;
+
+       if (mctrl & TIOCM_RTS)
+               mcr |= UART_MCR_RTS;
+       if (mctrl & TIOCM_DTR)
+               mcr |= UART_MCR_DTR;
+       if (mctrl & TIOCM_OUT1)
+               mcr |= UART_MCR_OUT1;
+       if (mctrl & TIOCM_OUT2)
+               mcr |= UART_MCR_OUT2;
+       if (mctrl & TIOCM_LOOP)
+               mcr |= UART_MCR_LOOP;
+
+       mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+
+       serial_port_out(port, UART_MCR, mcr);
+}
+
+static void serial8250_break_ctl(struct uart_port *port, int break_state)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+       if (break_state == -1)
+               up->lcr |= UART_LCR_SBC;
+       else
+               up->lcr &= ~UART_LCR_SBC;
+       serial_port_out(port, UART_LCR, up->lcr);
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ *     Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+       unsigned int status, tmout = 10000;
+
+       /* Wait up to 10ms for the character(s) to be sent. */
+       for (;;) {
+               status = serial_in(up, UART_LSR);
+
+               up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+
+               if ((status & bits) == bits)
+                       break;
+               if (--tmout == 0)
+                       break;
+               udelay(1);
+       }
+
+       /* Wait up to 1s for flow control if necessary */
+       if (up->port.flags & UPF_CONS_FLOW) {
+               unsigned int tmout;
+               for (tmout = 1000000; tmout; tmout--) {
+                       unsigned int msr = serial_in(up, UART_MSR);
+                       up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+                       if (msr & UART_MSR_CTS)
+                               break;
+                       udelay(1);
+                       touch_nmi_watchdog();
+               }
+       }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial8250_get_poll_char(struct uart_port *port)
+{
+       unsigned char lsr = serial_port_in(port, UART_LSR);
+
+       if (!(lsr & UART_LSR_DR))
+               return NO_POLL_CHAR;
+
+       return serial_port_in(port, UART_RX);
+}
+
+
+static void serial8250_put_poll_char(struct uart_port *port,
+                        unsigned char c)
+{
+       unsigned int ier;
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       /*
+        *      First save the IER then disable the interrupts
+        */
+       ier = serial_port_in(port, UART_IER);
+       if (up->capabilities & UART_CAP_UUE)
+               serial_port_out(port, UART_IER, UART_IER_UUE);
+       else
+               serial_port_out(port, UART_IER, 0);
+
+       wait_for_xmitr(up, BOTH_EMPTY);
+       /*
+        *      Send the character out.
+        *      If a LF, also do CR...
+        */
+       serial_port_out(port, UART_TX, c);
+       if (c == 10) {
+               wait_for_xmitr(up, BOTH_EMPTY);
+               serial_port_out(port, UART_TX, 13);
+       }
+
+       /*
+        *      Finally, wait for transmitter to become empty
+        *      and restore the IER
+        */
+       wait_for_xmitr(up, BOTH_EMPTY);
+       serial_port_out(port, UART_IER, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+static int serial8250_startup(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       unsigned char lsr, iir;
+       int retval;
+
+       if (port->type == PORT_8250_CIR)
+               return -ENODEV;
+
+       if (!port->fifosize)
+               port->fifosize = uart_config[port->type].fifo_size;
+       if (!up->tx_loadsz)
+               up->tx_loadsz = uart_config[port->type].tx_loadsz;
+       if (!up->capabilities)
+               up->capabilities = uart_config[port->type].flags;
+       up->mcr = 0;
+
+       if (port->iotype != up->cur_iotype)
+               set_io_from_upio(port);
+
+       if (port->type == PORT_16C950) {
+               /* Wake up and initialize UART */
+               up->acr = 0;
+               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+               serial_port_out(port, UART_EFR, UART_EFR_ECB);
+               serial_port_out(port, UART_IER, 0);
+               serial_port_out(port, UART_LCR, 0);
+               serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+               serial_port_out(port, UART_EFR, UART_EFR_ECB);
+               serial_port_out(port, UART_LCR, 0);
+       }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * If this is an RSA port, see if we can kick it up to the
+        * higher speed clock.
+        */
+       enable_rsa(up);
+#endif
+
+       /*
+        * Clear the FIFO buffers and disable them.
+        * (they will be reenabled in set_termios())
+        */
+       serial8250_clear_fifos(up);
+
+       /*
+        * Clear the interrupt registers.
+        */
+       serial_port_in(port, UART_LSR);
+       serial_port_in(port, UART_RX);
+       serial_port_in(port, UART_IIR);
+       serial_port_in(port, UART_MSR);
+
+       /*
+        * At this point, there's no way the LSR could still be 0xff;
+        * if it is, then bail out, because there's likely no UART
+        * here.
+        */
+       if (!(port->flags & UPF_BUGGY_UART) &&
+           (serial_port_in(port, UART_LSR) == 0xff)) {
+               printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+                                  serial_index(port));
+               return -ENODEV;
+       }
+
+       /*
+        * For a XR16C850, we need to set the trigger levels
+        */
+       if (port->type == PORT_16850) {
+               unsigned char fctr;
+
+               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+               fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+               serial_port_out(port, UART_FCTR,
+                               fctr | UART_FCTR_TRGD | UART_FCTR_RX);
+               serial_port_out(port, UART_TRG, UART_TRG_96);
+               serial_port_out(port, UART_FCTR,
+                               fctr | UART_FCTR_TRGD | UART_FCTR_TX);
+               serial_port_out(port, UART_TRG, UART_TRG_96);
+
+               serial_port_out(port, UART_LCR, 0);
+       }
+
+       if (port->irq) {
+               unsigned char iir1;
+               /*
+                * Test for UARTs that do not reassert THRE when the
+                * transmitter is idle and the interrupt has already
+                * been cleared.  Real 16550s should always reassert
+                * this interrupt whenever the transmitter is idle and
+                * the interrupt is enabled.  Delays are necessary to
+                * allow register changes to become visible.
+                */
+               spin_lock_irqsave(&port->lock, flags);
+               if (up->port.irqflags & IRQF_SHARED)
+                       disable_irq_nosync(port->irq);
+
+               wait_for_xmitr(up, UART_LSR_THRE);
+               serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+               udelay(1); /* allow THRE to set */
+               iir1 = serial_port_in(port, UART_IIR);
+               serial_port_out(port, UART_IER, 0);
+               serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+               udelay(1); /* allow a working UART time to re-assert THRE */
+               iir = serial_port_in(port, UART_IIR);
+               serial_port_out(port, UART_IER, 0);
+
+               if (port->irqflags & IRQF_SHARED)
+                       enable_irq(port->irq);
+               spin_unlock_irqrestore(&port->lock, flags);
+
+               /*
+                * If the interrupt is not reasserted, or we otherwise
+                * don't trust the iir, setup a timer to kick the UART
+                * on a regular basis.
+                */
+               if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
+                   up->port.flags & UPF_BUG_THRE) {
+                       up->bugs |= UART_BUG_THRE;
+                       pr_debug("ttyS%d - using backup timer\n",
+                                serial_index(port));
+               }
+       }
+
+       /*
+        * The above check will only give an accurate result the first time
+        * the port is opened so this value needs to be preserved.
+        */
+       if (up->bugs & UART_BUG_THRE) {
+               up->timer.function = serial8250_backup_timeout;
+               up->timer.data = (unsigned long)up;
+               mod_timer(&up->timer, jiffies +
+                       uart_poll_timeout(port) + HZ / 5);
+       }
+
+       /*
+        * If the "interrupt" for this port doesn't correspond with any
+        * hardware interrupt, we use a timer-based system.  The original
+        * driver used to do this with IRQ0.
+        */
+       if (!port->irq) {
+               up->timer.data = (unsigned long)up;
+               mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
+       } else {
+               retval = serial_link_irq_chain(up);
+               if (retval)
+                       return retval;
+       }
+
+       /*
+        * Now, initialize the UART
+        */
+       serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+
+       spin_lock_irqsave(&port->lock, flags);
+       if (up->port.flags & UPF_FOURPORT) {
+               if (!up->port.irq)
+                       up->port.mctrl |= TIOCM_OUT1;
+       } else
+               /*
+                * Most PC uarts need OUT2 raised to enable interrupts.
+                */
+               if (port->irq)
+                       up->port.mctrl |= TIOCM_OUT2;
+
+       serial8250_set_mctrl(port, port->mctrl);
+
+       /* Serial over Lan (SoL) hack:
+          Intel 8257x Gigabit ethernet chips have a
+          16550 emulation, to be used for Serial Over Lan.
+          Those chips take a longer time than a normal
+          serial device to signalize that a transmission
+          data was queued. Due to that, the above test generally
+          fails. One solution would be to delay the reading of
+          iir. However, this is not reliable, since the timeout
+          is variable. So, let's just don't test if we receive
+          TX irq. This way, we'll never enable UART_BUG_TXEN.
+        */
+       if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
+               goto dont_test_tx_en;
+
+       /*
+        * Do a quick test to see if we receive an
+        * interrupt when we enable the TX irq.
+        */
+       serial_port_out(port, UART_IER, UART_IER_THRI);
+       lsr = serial_port_in(port, UART_LSR);
+       iir = serial_port_in(port, UART_IIR);
+       serial_port_out(port, UART_IER, 0);
+
+       if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+               if (!(up->bugs & UART_BUG_TXEN)) {
+                       up->bugs |= UART_BUG_TXEN;
+                       pr_debug("ttyS%d - enabling bad tx status workarounds\n",
+                                serial_index(port));
+               }
+       } else {
+               up->bugs &= ~UART_BUG_TXEN;
+       }
+
+dont_test_tx_en:
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       /*
+        * Clear the interrupt registers again for luck, and clear the
+        * saved flags to avoid getting false values from polling
+        * routines or the previous session.
+        */
+       serial_port_in(port, UART_LSR);
+       serial_port_in(port, UART_RX);
+       serial_port_in(port, UART_IIR);
+       serial_port_in(port, UART_MSR);
+       up->lsr_saved_flags = 0;
+       up->msr_saved_flags = 0;
+
+       /*
+        * Request DMA channels for both RX and TX.
+        */
+       if (up->dma) {
+               retval = serial8250_request_dma(up);
+               if (retval) {
+                       pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
+                                           serial_index(port));
+                       up->dma = NULL;
+               }
+       }
+
+       /*
+        * Finally, enable interrupts.  Note: Modem status interrupts
+        * are set via set_termios(), which will be occurring imminently
+        * anyway, so we don't enable them here.
+        */
+       up->ier = UART_IER_RLSI | UART_IER_RDI;
+       serial_port_out(port, UART_IER, up->ier);
+
+       if (port->flags & UPF_FOURPORT) {
+               unsigned int icp;
+               /*
+                * Enable interrupts on the AST Fourport board
+                */
+               icp = (port->iobase & 0xfe0) | 0x01f;
+               outb_p(0x80, icp);
+               inb_p(icp);
+       }
+
+       return 0;
+}
+
+static void serial8250_shutdown(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+
+       /*
+        * Disable interrupts from this port
+        */
+       up->ier = 0;
+       serial_port_out(port, UART_IER, 0);
+
+       if (up->dma)
+               serial8250_release_dma(up);
+
+       spin_lock_irqsave(&port->lock, flags);
+       if (port->flags & UPF_FOURPORT) {
+               /* reset interrupts on the AST Fourport board */
+               inb((port->iobase & 0xfe0) | 0x1f);
+               port->mctrl |= TIOCM_OUT1;
+       } else
+               port->mctrl &= ~TIOCM_OUT2;
+
+       serial8250_set_mctrl(port, port->mctrl);
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       /*
+        * Disable break condition and FIFOs
+        */
+       serial_port_out(port, UART_LCR,
+                       serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
+       serial8250_clear_fifos(up);
+
+#ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * Reset the RSA board back to 115kbps compat mode.
+        */
+       disable_rsa(up);
+#endif
+
+       /*
+        * Read data port to reset things, and then unlink from
+        * the IRQ chain.
+        */
+       serial_port_in(port, UART_RX);
+
+       del_timer_sync(&up->timer);
+       up->timer.function = serial8250_timeout;
+       if (port->irq)
+               serial_unlink_irq_chain(up);
+}
+
+static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
+{
+       unsigned int quot;
+
+       /*
+        * Handle magic divisors for baud rates above baud_base on
+        * SMSC SuperIO chips.
+        */
+       if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+           baud == (port->uartclk/4))
+               quot = 0x8001;
+       else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+                baud == (port->uartclk/8))
+               quot = 0x8002;
+       else
+               quot = uart_get_divisor(port, baud);
+
+       return quot;
+}
+
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+                         struct ktermios *old)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned char cval, fcr = 0;
+       unsigned long flags;
+       unsigned int baud, quot;
+       int fifo_bug = 0;
+
+       switch (termios->c_cflag & CSIZE) {
+       case CS5:
+               cval = UART_LCR_WLEN5;
+               break;
+       case CS6:
+               cval = UART_LCR_WLEN6;
+               break;
+       case CS7:
+               cval = UART_LCR_WLEN7;
+               break;
+       default:
+       case CS8:
+               cval = UART_LCR_WLEN8;
+               break;
+       }
+
+       if (termios->c_cflag & CSTOPB)
+               cval |= UART_LCR_STOP;
+       if (termios->c_cflag & PARENB) {
+               cval |= UART_LCR_PARITY;
+               if (up->bugs & UART_BUG_PARITY)
+                       fifo_bug = 1;
+       }
+       if (!(termios->c_cflag & PARODD))
+               cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+       if (termios->c_cflag & CMSPAR)
+               cval |= UART_LCR_SPAR;
+#endif
+
+       /*
+        * Ask the core to calculate the divisor for us.
+        */
+       baud = uart_get_baud_rate(port, termios, old,
+                                 port->uartclk / 16 / 0xffff,
+                                 port->uartclk / 16);
+       quot = serial8250_get_divisor(port, baud);
+
+       /*
+        * Oxford Semi 952 rev B workaround
+        */
+       if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+               quot++;
+
+       if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
+               fcr = uart_config[port->type].fcr;
+               if (baud < 2400 || fifo_bug) {
+                       fcr &= ~UART_FCR_TRIGGER_MASK;
+                       fcr |= UART_FCR_TRIGGER_1;
+               }
+       }
+
+       /*
+        * MCR-based auto flow control.  When AFE is enabled, RTS will be
+        * deasserted when the receive FIFO contains more characters than
+        * the trigger, or the MCR RTS bit is cleared.  In the case where
+        * the remote UART is not using CTS auto flow control, we must
+        * have sufficient FIFO entries for the latency of the remote
+        * UART to respond.  IOW, at least 32 bytes of FIFO.
+        */
+       if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
+               up->mcr &= ~UART_MCR_AFE;
+               if (termios->c_cflag & CRTSCTS)
+                       up->mcr |= UART_MCR_AFE;
+       }
+
+       /*
+        * Ok, we're now changing the port state.  Do it with
+        * interrupts disabled.
+        */
+       spin_lock_irqsave(&port->lock, flags);
+
+       /*
+        * Update the per-port timeout.
+        */
+       uart_update_timeout(port, termios->c_cflag, baud);
+
+       port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+       if (termios->c_iflag & INPCK)
+               port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+       if (termios->c_iflag & (BRKINT | PARMRK))
+               port->read_status_mask |= UART_LSR_BI;
+
+       /*
+        * Characteres to ignore
+        */
+       port->ignore_status_mask = 0;
+       if (termios->c_iflag & IGNPAR)
+               port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+       if (termios->c_iflag & IGNBRK) {
+               port->ignore_status_mask |= UART_LSR_BI;
+               /*
+                * If we're ignoring parity and break indicators,
+                * ignore overruns too (for real raw support).
+                */
+               if (termios->c_iflag & IGNPAR)
+                       port->ignore_status_mask |= UART_LSR_OE;
+       }
+
+       /*
+        * ignore all characters if CREAD is not set
+        */
+       if ((termios->c_cflag & CREAD) == 0)
+               port->ignore_status_mask |= UART_LSR_DR;
+
+       /*
+        * CTS flow control flag and modem status interrupts
+        */
+       up->ier &= ~UART_IER_MSI;
+       if (!(up->bugs & UART_BUG_NOMSR) &&
+                       UART_ENABLE_MS(&up->port, termios->c_cflag))
+               up->ier |= UART_IER_MSI;
+       if (up->capabilities & UART_CAP_UUE)
+               up->ier |= UART_IER_UUE;
+       if (up->capabilities & UART_CAP_RTOIE)
+               up->ier |= UART_IER_RTOIE;
+
+       serial_port_out(port, UART_IER, up->ier);
+
+       if (up->capabilities & UART_CAP_EFR) {
+               unsigned char efr = 0;
+               /*
+                * TI16C752/Startech hardware flow control.  FIXME:
+                * - TI16C752 requires control thresholds to be set.
+                * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
+                */
+               if (termios->c_cflag & CRTSCTS)
+                       efr |= UART_EFR_CTS;
+
+               serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+               if (port->flags & UPF_EXAR_EFR)
+                       serial_port_out(port, UART_XR_EFR, efr);
+               else
+                       serial_port_out(port, UART_EFR, efr);
+       }
+
+       /* Workaround to enable 115200 baud on OMAP1510 internal ports */
+       if (is_omap1510_8250(up)) {
+               if (baud == 115200) {
+                       quot = 1;
+                       serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
+               } else
+                       serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
+       }
+
+       /*
+        * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
+        * otherwise just set DLAB
+        */
+       if (up->capabilities & UART_NATSEMI)
+               serial_port_out(port, UART_LCR, 0xe0);
+       else
+               serial_port_out(port, UART_LCR, cval | UART_LCR_DLAB);
+
+       serial_dl_write(up, quot);
+
+       /*
+        * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
+        * is written without DLAB set, this mode will be disabled.
+        */
+       if (port->type == PORT_16750)
+               serial_port_out(port, UART_FCR, fcr);
+
+       serial_port_out(port, UART_LCR, cval);          /* reset DLAB */
+       up->lcr = cval;                                 /* Save LCR */
+       if (port->type != PORT_16750) {
+               /* emulated UARTs (Lucent Venus 167x) need two steps */
+               if (fcr & UART_FCR_ENABLE_FIFO)
+                       serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+               serial_port_out(port, UART_FCR, fcr);           /* set fcr */
+       }
+       serial8250_set_mctrl(port, port->mctrl);
+       spin_unlock_irqrestore(&port->lock, flags);
+       /* Don't rewrite B0 */
+       if (tty_termios_baud_rate(termios))
+               tty_termios_encode_baud_rate(termios, baud, baud);
+}
+EXPORT_SYMBOL(serial8250_do_set_termios);
+
+static void
+serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+                      struct ktermios *old)
+{
+       if (port->set_termios)
+               port->set_termios(port, termios, old);
+       else
+               serial8250_do_set_termios(port, termios, old);
+}
+
+static void
+serial8250_set_ldisc(struct uart_port *port, int new)
+{
+       if (new == N_PPS) {
+               port->flags |= UPF_HARDPPS_CD;
+               serial8250_enable_ms(port);
+       } else
+               port->flags &= ~UPF_HARDPPS_CD;
+}
+
+
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+                     unsigned int oldstate)
+{
+       struct uart_8250_port *p =
+               container_of(port, struct uart_8250_port, port);
+
+       serial8250_set_sleep(p, state != 0);
+}
+EXPORT_SYMBOL(serial8250_do_pm);
+
+static void
+serial8250_pm(struct uart_port *port, unsigned int state,
+             unsigned int oldstate)
+{
+       if (port->pm)
+               port->pm(port, state, oldstate);
+       else
+               serial8250_do_pm(port, state, oldstate);
+}
+
+static unsigned int serial8250_port_size(struct uart_8250_port *pt)
+{
+       if (pt->port.iotype == UPIO_AU)
+               return 0x1000;
+       if (is_omap1_8250(pt))
+               return 0x16 << pt->port.regshift;
+
+       return 8 << pt->port.regshift;
+}
+
+/*
+ * Resource handling.
+ */
+static int serial8250_request_std_resource(struct uart_8250_port *up)
+{
+       unsigned int size = serial8250_port_size(up);
+       struct uart_port *port = &up->port;
+       int ret = 0;
+
+       switch (port->iotype) {
+       case UPIO_AU:
+       case UPIO_TSI:
+       case UPIO_MEM32:
+       case UPIO_MEM:
+               if (!port->mapbase)
+                       break;
+
+               if (!request_mem_region(port->mapbase, size, "serial")) {
+                       ret = -EBUSY;
+                       break;
+               }
+
+               if (port->flags & UPF_IOREMAP) {
+                       port->membase = ioremap_nocache(port->mapbase, size);
+                       if (!port->membase) {
+                               release_mem_region(port->mapbase, size);
+                               ret = -ENOMEM;
+                       }
+               }
+               break;
+
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               if (!request_region(port->iobase, size, "serial"))
+                       ret = -EBUSY;
+               break;
+       }
+       return ret;
+}
+
+static void serial8250_release_std_resource(struct uart_8250_port *up)
+{
+       unsigned int size = serial8250_port_size(up);
+       struct uart_port *port = &up->port;
+
+       switch (port->iotype) {
+       case UPIO_AU:
+       case UPIO_TSI:
+       case UPIO_MEM32:
+       case UPIO_MEM:
+               if (!port->mapbase)
+                       break;
+
+               if (port->flags & UPF_IOREMAP) {
+                       iounmap(port->membase);
+                       port->membase = NULL;
+               }
+
+               release_mem_region(port->mapbase, size);
+               break;
+
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               release_region(port->iobase, size);
+               break;
+       }
+}
+
+static int serial8250_request_rsa_resource(struct uart_8250_port *up)
+{
+       unsigned long start = UART_RSA_BASE << up->port.regshift;
+       unsigned int size = 8 << up->port.regshift;
+       struct uart_port *port = &up->port;
+       int ret = -EINVAL;
+
+       switch (port->iotype) {
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               start += port->iobase;
+               if (request_region(start, size, "serial-rsa"))
+                       ret = 0;
+               else
+                       ret = -EBUSY;
+               break;
+       }
+
+       return ret;
+}
+
+static void serial8250_release_rsa_resource(struct uart_8250_port *up)
+{
+       unsigned long offset = UART_RSA_BASE << up->port.regshift;
+       unsigned int size = 8 << up->port.regshift;
+       struct uart_port *port = &up->port;
+
+       switch (port->iotype) {
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               release_region(port->iobase + offset, size);
+               break;
+       }
+}
+
+static void serial8250_release_port(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       serial8250_release_std_resource(up);
+       if (port->type == PORT_RSA)
+               serial8250_release_rsa_resource(up);
+}
+
+static int serial8250_request_port(struct uart_port *port)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       int ret;
+
+       if (port->type == PORT_8250_CIR)
+               return -ENODEV;
+
+       ret = serial8250_request_std_resource(up);
+       if (ret == 0 && port->type == PORT_RSA) {
+               ret = serial8250_request_rsa_resource(up);
+               if (ret < 0)
+                       serial8250_release_std_resource(up);
+       }
+
+       return ret;
+}
+
+static void serial8250_config_port(struct uart_port *port, int flags)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       int probeflags = PROBE_ANY;
+       int ret;
+
+       if (port->type == PORT_8250_CIR)
+               return;
+
+       /*
+        * Find the region that we can probe for.  This in turn
+        * tells us whether we can probe for the type of port.
+        */
+       ret = serial8250_request_std_resource(up);
+       if (ret < 0)
+               return;
+
+       ret = serial8250_request_rsa_resource(up);
+       if (ret < 0)
+               probeflags &= ~PROBE_RSA;
+
+       if (port->iotype != up->cur_iotype)
+               set_io_from_upio(port);
+
+       if (flags & UART_CONFIG_TYPE)
+               autoconfig(up, probeflags);
+
+       /* if access method is AU, it is a 16550 with a quirk */
+       if (port->type == PORT_16550A && port->iotype == UPIO_AU)
+               up->bugs |= UART_BUG_NOMSR;
+
+       if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+               autoconfig_irq(up);
+
+       if (port->type != PORT_RSA && probeflags & PROBE_RSA)
+               serial8250_release_rsa_resource(up);
+       if (port->type == PORT_UNKNOWN)
+               serial8250_release_std_resource(up);
+
+       /* Fixme: probably not the best place for this */
+       if ((port->type == PORT_XR17V35X) ||
+          (port->type == PORT_XR17D15X))
+               port->handle_irq = exar_handle_irq;
+}
+
+static int
+serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+       if (ser->irq >= nr_irqs || ser->irq < 0 ||
+           ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+           ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+           ser->type == PORT_STARTECH)
+               return -EINVAL;
+       return 0;
+}
+
+static const char *
+serial8250_type(struct uart_port *port)
+{
+       int type = port->type;
+
+       if (type >= ARRAY_SIZE(uart_config))
+               type = 0;
+       return uart_config[type].name;
+}
+
+static struct uart_ops serial8250_pops = {
+       .tx_empty       = serial8250_tx_empty,
+       .set_mctrl      = serial8250_set_mctrl,
+       .get_mctrl      = serial8250_get_mctrl,
+       .stop_tx        = serial8250_stop_tx,
+       .start_tx       = serial8250_start_tx,
+       .stop_rx        = serial8250_stop_rx,
+       .enable_ms      = serial8250_enable_ms,
+       .break_ctl      = serial8250_break_ctl,
+       .startup        = serial8250_startup,
+       .shutdown       = serial8250_shutdown,
+       .set_termios    = serial8250_set_termios,
+       .set_ldisc      = serial8250_set_ldisc,
+       .pm             = serial8250_pm,
+       .type           = serial8250_type,
+       .release_port   = serial8250_release_port,
+       .request_port   = serial8250_request_port,
+       .config_port    = serial8250_config_port,
+       .verify_port    = serial8250_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char = serial8250_get_poll_char,
+       .poll_put_char = serial8250_put_poll_char,
+#endif
+};
+
+static struct uart_8250_port serial8250_ports[UART_NR];
+
+static void (*serial8250_isa_config)(int port, struct uart_port *up,
+       unsigned short *capabilities);
+
+void serial8250_set_isa_configurator(
+       void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+{
+       serial8250_isa_config = v;
+}
+EXPORT_SYMBOL(serial8250_set_isa_configurator);
+
+static void __init serial8250_isa_init_ports(void)
+{
+       struct uart_8250_port *up;
+       static int first = 1;
+       int i, irqflag = 0;
+
+       if (!first)
+               return;
+       first = 0;
+
+       if (nr_uarts > UART_NR)
+               nr_uarts = UART_NR;
+
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               struct uart_port *port = &up->port;
+
+               port->line = i;
+               spin_lock_init(&port->lock);
+
+               init_timer(&up->timer);
+               up->timer.function = serial8250_timeout;
+               up->cur_iotype = 0xFF;
+
+               /*
+                * ALPHA_KLUDGE_MCR needs to be killed.
+                */
+               up->mcr_mask = ~ALPHA_KLUDGE_MCR;
+               up->mcr_force = ALPHA_KLUDGE_MCR;
+
+               port->ops = &serial8250_pops;
+       }
+
+       if (share_irqs)
+               irqflag = IRQF_SHARED;
+
+       for (i = 0, up = serial8250_ports;
+            i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
+            i++, up++) {
+               struct uart_port *port = &up->port;
+
+               port->iobase   = old_serial_port[i].port;
+               port->irq      = irq_canonicalize(old_serial_port[i].irq);
+               port->irqflags = old_serial_port[i].irqflags;
+               port->uartclk  = old_serial_port[i].baud_base * 16;
+               port->flags    = old_serial_port[i].flags;
+               port->hub6     = old_serial_port[i].hub6;
+               port->membase  = old_serial_port[i].iomem_base;
+               port->iotype   = old_serial_port[i].io_type;
+               port->regshift = old_serial_port[i].iomem_reg_shift;
+               set_io_from_upio(port);
+               port->irqflags |= irqflag;
+               if (serial8250_isa_config != NULL)
+                       serial8250_isa_config(i, &up->port, &up->capabilities);
+
+       }
+}
+
+static void
+serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+{
+       up->port.type = type;
+       if (!up->port.fifosize)
+               up->port.fifosize = uart_config[type].fifo_size;
+       if (!up->tx_loadsz)
+               up->tx_loadsz = uart_config[type].tx_loadsz;
+       if (!up->capabilities)
+               up->capabilities = uart_config[type].flags;
+}
+
+static void __init
+serial8250_register_ports(struct uart_driver *drv, struct device *dev)
+{
+       int i;
+
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+
+               if (up->port.dev)
+                       continue;
+
+               up->port.dev = dev;
+
+               if (up->port.flags & UPF_FIXED_TYPE)
+                       serial8250_init_fixed_type_port(up, up->port.type);
+
+               uart_add_one_port(drv, &up->port);
+       }
+}
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+static void serial8250_console_putchar(struct uart_port *port, int ch)
+{
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+
+       wait_for_xmitr(up, UART_LSR_THRE);
+       serial_port_out(port, UART_TX, ch);
+}
+
+/*
+ *     Print a string to the serial port trying not to disturb
+ *     any possible real use of the port...
+ *
+ *     The console_lock must be held when we get here.
+ */
+static void
+serial8250_console_write(struct console *co, const char *s, unsigned int count)
+{
+       struct uart_8250_port *up = &serial8250_ports[co->index];
+       struct uart_port *port = &up->port;
+       unsigned long flags;
+       unsigned int ier;
+       int locked = 1;
+
+       touch_nmi_watchdog();
+
+       local_irq_save(flags);
+       if (port->sysrq) {
+               /* serial8250_handle_irq() already took the lock */
+               locked = 0;
+       } else if (oops_in_progress) {
+               locked = spin_trylock(&port->lock);
+       } else
+               spin_lock(&port->lock);
+
+       /*
+        *      First save the IER then disable the interrupts
+        */
+       ier = serial_port_in(port, UART_IER);
+
+       if (up->capabilities & UART_CAP_UUE)
+               serial_port_out(port, UART_IER, UART_IER_UUE);
+       else
+               serial_port_out(port, UART_IER, 0);
+
+       uart_console_write(port, s, count, serial8250_console_putchar);
+
+       /*
+        *      Finally, wait for transmitter to become empty
+        *      and restore the IER
+        */
+       wait_for_xmitr(up, BOTH_EMPTY);
+       serial_port_out(port, UART_IER, ier);
+
+       /*
+        *      The receive handling will happen properly because the
+        *      receive ready bit will still be set; it is not cleared
+        *      on read.  However, modem control will not, we must
+        *      call it if we have saved something in the saved flags
+        *      while processing with interrupts off.
+        */
+       if (up->msr_saved_flags)
+               serial8250_modem_status(up);
+
+       if (locked)
+               spin_unlock(&port->lock);
+       local_irq_restore(flags);
+}
+
+static int __init serial8250_console_setup(struct console *co, char *options)
+{
+       struct uart_port *port;
+       int baud = 9600;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       /*
+        * Check whether an invalid uart number has been specified, and
+        * if so, search for the first available port that does have
+        * console support.
+        */
+       if (co->index >= nr_uarts)
+               co->index = 0;
+       port = &serial8250_ports[co->index].port;
+       if (!port->iobase && !port->membase)
+               return -ENODEV;
+
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+       return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static int serial8250_console_early_setup(void)
+{
+       return serial8250_find_port_for_earlycon();
+}
+
+static struct console serial8250_console = {
+       .name           = "ttyS",
+       .write          = serial8250_console_write,
+       .device         = uart_console_device,
+       .setup          = serial8250_console_setup,
+       .early_setup    = serial8250_console_early_setup,
+       .flags          = CON_PRINTBUFFER | CON_ANYTIME,
+       .index          = -1,
+       .data           = &serial8250_reg,
+};
+
+static int __init serial8250_console_init(void)
+{
+       serial8250_isa_init_ports();
+       register_console(&serial8250_console);
+       return 0;
+}
+console_initcall(serial8250_console_init);
+
+int serial8250_find_port(struct uart_port *p)
+{
+       int line;
+       struct uart_port *port;
+
+       for (line = 0; line < nr_uarts; line++) {
+               port = &serial8250_ports[line].port;
+               if (uart_match_port(p, port))
+                       return line;
+       }
+       return -ENODEV;
+}
+
+#define SERIAL8250_CONSOLE     &serial8250_console
+#else
+#define SERIAL8250_CONSOLE     NULL
+#endif
+
+static struct uart_driver serial8250_reg = {
+       .owner                  = THIS_MODULE,
+       .driver_name            = "serial",
+       .dev_name               = "ttyS",
+       .major                  = TTY_MAJOR,
+       .minor                  = 64,
+       .cons                   = SERIAL8250_CONSOLE,
+};
+
+/*
+ * early_serial_setup - early registration for 8250 ports
+ *
+ * Setup an 8250 port structure prior to console initialisation.  Use
+ * after console initialisation will cause undefined behaviour.
+ */
+int __init early_serial_setup(struct uart_port *port)
+{
+       struct uart_port *p;
+
+       if (port->line >= ARRAY_SIZE(serial8250_ports))
+               return -ENODEV;
+
+       serial8250_isa_init_ports();
+       p = &serial8250_ports[port->line].port;
+       p->iobase       = port->iobase;
+       p->membase      = port->membase;
+       p->irq          = port->irq;
+       p->irqflags     = port->irqflags;
+       p->uartclk      = port->uartclk;
+       p->fifosize     = port->fifosize;
+       p->regshift     = port->regshift;
+       p->iotype       = port->iotype;
+       p->flags        = port->flags;
+       p->mapbase      = port->mapbase;
+       p->private_data = port->private_data;
+       p->type         = port->type;
+       p->line         = port->line;
+
+       set_io_from_upio(p);
+       if (port->serial_in)
+               p->serial_in = port->serial_in;
+       if (port->serial_out)
+               p->serial_out = port->serial_out;
+       if (port->handle_irq)
+               p->handle_irq = port->handle_irq;
+       else
+               p->handle_irq = serial8250_default_handle_irq;
+
+       return 0;
+}
+
+/**
+ *     serial8250_suspend_port - suspend one serial port
+ *     @line:  serial line number
+ *
+ *     Suspend one serial port.
+ */
+void serial8250_suspend_port(int line)
+{
+       uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port);
+}
+
+/**
+ *     serial8250_resume_port - resume one serial port
+ *     @line:  serial line number
+ *
+ *     Resume one serial port.
+ */
+void serial8250_resume_port(int line)
+{
+       struct uart_8250_port *up = &serial8250_ports[line];
+       struct uart_port *port = &up->port;
+
+       if (up->capabilities & UART_NATSEMI) {
+               /* Ensure it's still in high speed mode */
+               serial_port_out(port, UART_LCR, 0xE0);
+
+               ns16550a_goto_highspeed(up);
+
+               serial_port_out(port, UART_LCR, 0);
+               port->uartclk = 921600*16;
+       }
+       uart_resume_port(&serial8250_reg, port);
+}
+
+/*
+ * Register a set of serial devices attached to a platform device.  The
+ * list is terminated with a zero flags entry, which means we expect
+ * all entries to have at least UPF_BOOT_AUTOCONF set.
+ */
+static int serial8250_probe(struct platform_device *dev)
+{
+       struct plat_serial8250_port *p = dev->dev.platform_data;
+       struct uart_8250_port uart;
+       int ret, i, irqflag = 0;
+
+       memset(&uart, 0, sizeof(uart));
+
+       if (share_irqs)
+               irqflag = IRQF_SHARED;
+
+       for (i = 0; p && p->flags != 0; p++, i++) {
+               uart.port.iobase        = p->iobase;
+               uart.port.membase       = p->membase;
+               uart.port.irq           = p->irq;
+               uart.port.irqflags      = p->irqflags;
+               uart.port.uartclk       = p->uartclk;
+               uart.port.regshift      = p->regshift;
+               uart.port.iotype        = p->iotype;
+               uart.port.flags         = p->flags;
+               uart.port.mapbase       = p->mapbase;
+               uart.port.hub6          = p->hub6;
+               uart.port.private_data  = p->private_data;
+               uart.port.type          = p->type;
+               uart.port.serial_in     = p->serial_in;
+               uart.port.serial_out    = p->serial_out;
+               uart.port.handle_irq    = p->handle_irq;
+               uart.port.handle_break  = p->handle_break;
+               uart.port.set_termios   = p->set_termios;
+               uart.port.pm            = p->pm;
+               uart.port.dev           = &dev->dev;
+               uart.port.irqflags      |= irqflag;
+               ret = serial8250_register_8250_port(&uart);
+               if (ret < 0) {
+                       dev_err(&dev->dev, "unable to register port at index %d "
+                               "(IO%lx MEM%llx IRQ%d): %d\n", i,
+                               p->iobase, (unsigned long long)p->mapbase,
+                               p->irq, ret);
+               }
+       }
+       return 0;
+}
+
+/*
+ * Remove serial ports registered against a platform device.
+ */
+static int serial8250_remove(struct platform_device *dev)
+{
+       int i;
+
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+
+               if (up->port.dev == &dev->dev)
+                       serial8250_unregister_port(i);
+       }
+       return 0;
+}
+
+static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
+{
+       int i;
+
+       for (i = 0; i < UART_NR; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+
+               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+                       uart_suspend_port(&serial8250_reg, &up->port);
+       }
+
+       return 0;
+}
+
+static int serial8250_resume(struct platform_device *dev)
+{
+       int i;
+
+       for (i = 0; i < UART_NR; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+
+               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+                       serial8250_resume_port(i);
+       }
+
+       return 0;
+}
+
+static struct platform_driver serial8250_isa_driver = {
+       .probe          = serial8250_probe,
+       .remove         = serial8250_remove,
+       .suspend        = serial8250_suspend,
+       .resume         = serial8250_resume,
+       .driver         = {
+               .name   = "serial8250",
+               .owner  = THIS_MODULE,
+       },
+};
+
+/*
+ * This "device" covers _all_ ISA 8250-compatible serial devices listed
+ * in the table in include/asm/serial.h
+ */
+static struct platform_device *serial8250_isa_devs;
+
+/*
+ * serial8250_register_8250_port and serial8250_unregister_port allows for
+ * 16x50 serial ports to be configured at run-time, to support PCMCIA
+ * modems and PCI multiport cards.
+ */
+static DEFINE_MUTEX(serial_mutex);
+
+static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
+{
+       int i;
+
+       /*
+        * First, find a port entry which matches.
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (uart_match_port(&serial8250_ports[i].port, port))
+                       return &serial8250_ports[i];
+
+       /*
+        * We didn't find a matching entry, so look for the first
+        * free entry.  We look for one which hasn't been previously
+        * used (indicated by zero iobase).
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
+                   serial8250_ports[i].port.iobase == 0)
+                       return &serial8250_ports[i];
+
+       /*
+        * That also failed.  Last resort is to find any entry which
+        * doesn't have a real port associated with it.
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (serial8250_ports[i].port.type == PORT_UNKNOWN)
+                       return &serial8250_ports[i];
+
+       return NULL;
+}
+
+/**
+ *     serial8250_register_8250_port - register a serial port
+ *     @up: serial port template
+ *
+ *     Configure the serial port specified by the request. If the
+ *     port exists and is in use, it is hung up and unregistered
+ *     first.
+ *
+ *     The port is then probed and if necessary the IRQ is autodetected
+ *     If this fails an error is returned.
+ *
+ *     On success the port is ready to use and the line number is returned.
+ */
+int serial8250_register_8250_port(struct uart_8250_port *up)
+{
+       struct uart_8250_port *uart;
+       int ret = -ENOSPC;
+
+       if (up->port.uartclk == 0)
+               return -EINVAL;
+
+       mutex_lock(&serial_mutex);
+
+       uart = serial8250_find_match_or_unused(&up->port);
+       if (uart && uart->port.type != PORT_8250_CIR) {
+               if (uart->port.dev)
+                       uart_remove_one_port(&serial8250_reg, &uart->port);
+
+               uart->port.iobase       = up->port.iobase;
+               uart->port.membase      = up->port.membase;
+               uart->port.irq          = up->port.irq;
+               uart->port.irqflags     = up->port.irqflags;
+               uart->port.uartclk      = up->port.uartclk;
+               uart->port.fifosize     = up->port.fifosize;
+               uart->port.regshift     = up->port.regshift;
+               uart->port.iotype       = up->port.iotype;
+               uart->port.flags        = up->port.flags | UPF_BOOT_AUTOCONF;
+               uart->bugs              = up->bugs;
+               uart->port.mapbase      = up->port.mapbase;
+               uart->port.private_data = up->port.private_data;
+               uart->port.fifosize     = up->port.fifosize;
+               uart->tx_loadsz         = up->tx_loadsz;
+               uart->capabilities      = up->capabilities;
+
+               if (up->port.dev)
+                       uart->port.dev = up->port.dev;
+
+               if (up->port.flags & UPF_FIXED_TYPE)
+                       serial8250_init_fixed_type_port(uart, up->port.type);
+
+               set_io_from_upio(&uart->port);
+               /* Possibly override default I/O functions.  */
+               if (up->port.serial_in)
+                       uart->port.serial_in = up->port.serial_in;
+               if (up->port.serial_out)
+                       uart->port.serial_out = up->port.serial_out;
+               if (up->port.handle_irq)
+                       uart->port.handle_irq = up->port.handle_irq;
+               /*  Possibly override set_termios call */
+               if (up->port.set_termios)
+                       uart->port.set_termios = up->port.set_termios;
+               if (up->port.pm)
+                       uart->port.pm = up->port.pm;
+               if (up->port.handle_break)
+                       uart->port.handle_break = up->port.handle_break;
+               if (up->dl_read)
+                       uart->dl_read = up->dl_read;
+               if (up->dl_write)
+                       uart->dl_write = up->dl_write;
+               if (up->dma)
+                       uart->dma = up->dma;
+
+               if (serial8250_isa_config != NULL)
+                       serial8250_isa_config(0, &uart->port,
+                                       &uart->capabilities);
+
+               ret = uart_add_one_port(&serial8250_reg, &uart->port);
+               if (ret == 0)
+                       ret = uart->port.line;
+       }
+       mutex_unlock(&serial_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(serial8250_register_8250_port);
+
+/**
+ *     serial8250_unregister_port - remove a 16x50 serial port at runtime
+ *     @line: serial line number
+ *
+ *     Remove one serial port.  This may not be called from interrupt
+ *     context.  We hand the port back to the our control.
+ */
+void serial8250_unregister_port(int line)
+{
+       struct uart_8250_port *uart = &serial8250_ports[line];
+
+       mutex_lock(&serial_mutex);
+       uart_remove_one_port(&serial8250_reg, &uart->port);
+       if (serial8250_isa_devs) {
+               uart->port.flags &= ~UPF_BOOT_AUTOCONF;
+               uart->port.type = PORT_UNKNOWN;
+               uart->port.dev = &serial8250_isa_devs->dev;
+               uart->capabilities = uart_config[uart->port.type].flags;
+               uart_add_one_port(&serial8250_reg, &uart->port);
+       } else {
+               uart->port.dev = NULL;
+       }
+       mutex_unlock(&serial_mutex);
+}
+EXPORT_SYMBOL(serial8250_unregister_port);
+
+static int __init serial8250_init(void)
+{
+       int ret;
+
+       serial8250_isa_init_ports();
+
+       printk(KERN_INFO "Serial: 8250/16550 driver, "
+               "%d ports, IRQ sharing %sabled\n", nr_uarts,
+               share_irqs ? "en" : "dis");
+
+#ifdef CONFIG_SPARC
+       ret = sunserial_register_minors(&serial8250_reg, UART_NR);
+#else
+       serial8250_reg.nr = UART_NR;
+       ret = uart_register_driver(&serial8250_reg);
+#endif
+       if (ret)
+               goto out;
+
+       ret = serial8250_pnp_init();
+       if (ret)
+               goto unreg_uart_drv;
+
+       serial8250_isa_devs = platform_device_alloc("serial8250",
+                                                   PLAT8250_DEV_LEGACY);
+       if (!serial8250_isa_devs) {
+               ret = -ENOMEM;
+               goto unreg_pnp;
+       }
+
+       ret = platform_device_add(serial8250_isa_devs);
+       if (ret)
+               goto put_dev;
+
+       serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
+
+       ret = platform_driver_register(&serial8250_isa_driver);
+       if (ret == 0)
+               goto out;
+
+       platform_device_del(serial8250_isa_devs);
+put_dev:
+       platform_device_put(serial8250_isa_devs);
+unreg_pnp:
+       serial8250_pnp_exit();
+unreg_uart_drv:
+#ifdef CONFIG_SPARC
+       sunserial_unregister_minors(&serial8250_reg, UART_NR);
+#else
+       uart_unregister_driver(&serial8250_reg);
+#endif
+out:
+       return ret;
+}
+
+static void __exit serial8250_exit(void)
+{
+       struct platform_device *isa_dev = serial8250_isa_devs;
+
+       /*
+        * This tells serial8250_unregister_port() not to re-register
+        * the ports (thereby making serial8250_isa_driver permanently
+        * in use.)
+        */
+       serial8250_isa_devs = NULL;
+
+       platform_driver_unregister(&serial8250_isa_driver);
+       platform_device_unregister(isa_dev);
+
+       serial8250_pnp_exit();
+
+#ifdef CONFIG_SPARC
+       sunserial_unregister_minors(&serial8250_reg, UART_NR);
+#else
+       uart_unregister_driver(&serial8250_reg);
+#endif
+}
+
+module_init(serial8250_init);
+module_exit(serial8250_exit);
+
+EXPORT_SYMBOL(serial8250_suspend_port);
+EXPORT_SYMBOL(serial8250_resume_port);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
+
+module_param(share_irqs, uint, 0644);
+MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
+       " (unsafe)");
+
+module_param(nr_uarts, uint, 0644);
+MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
+
+module_param(skip_txen_test, uint, 0644);
+MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+
+#ifdef CONFIG_SERIAL_8250_RSA
+module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
+MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
+#endif
+MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
+
+#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
+#ifndef MODULE
+/* This module was renamed to 8250_core in 3.7.  Keep the old "8250" name
+ * working as well for the module options so we don't break people.  We
+ * need to keep the names identical and the convenient macros will happily
+ * refuse to let us do that by failing the build with redefinition errors
+ * of global variables.  So we stick them inside a dummy function to avoid
+ * those conflicts.  The options still get parsed, and the redefined
+ * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive.
+ *
+ * This is hacky.  I'm sorry.
+ */
+static void __used s8250_options(void)
+{
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "8250_core."
+
+       module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644);
+       module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644);
+       module_param_cb(skip_txen_test, &param_ops_uint, &skip_txen_test, 0644);
+#ifdef CONFIG_SERIAL_8250_RSA
+       __module_param_call(MODULE_PARAM_PREFIX, probe_rsa,
+               &param_array_ops, .arr = &__param_arr_probe_rsa,
+               0444, -1);
+#endif
+}
+#else
+MODULE_ALIAS("8250_core");
+#endif
+#endif
index aa76825229dca32d612781f877b1d1167add71af..26e3a97ab157ed16d23825c1a62bec4d53cd81b9 100644 (file)
@@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
 #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
 #define PCI_VENDOR_ID_WCH              0x4348
+#define PCI_DEVICE_ID_WCH_CH352_2S     0x3253
 #define PCI_DEVICE_ID_WCH_CH353_4S     0x3453
 #define PCI_DEVICE_ID_WCH_CH353_2S1PF  0x5046
 #define PCI_DEVICE_ID_WCH_CH353_2S1P   0x7053
@@ -2172,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
+       /* WCH CH352 2S card (16550 clone) */
+       {
+               .vendor         = PCI_VENDOR_ID_WCH,
+               .device         = PCI_DEVICE_ID_WCH_CH352_2S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch353_setup,
+       },
        /*
         * ASIX devices with FIFO bug
         */
@@ -4870,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_b0_bt_2_115200 },
+
        /*
         * Commtech, Inc. Fastcom adapters
         */
index 2ef9537bcb2cda6a985d27f9dda8c3385187fe3a..80fe91e64a527da06e82d9cf237be64a26faf662 100644 (file)
@@ -33,6 +33,23 @@ config SERIAL_8250
          Most people will say Y or M here, so that they can use serial mice,
          modems and similar devices connecting to the standard serial ports.
 
+config SERIAL_8250_DEPRECATED_OPTIONS
+       bool "Support 8250_core.* kernel options (DEPRECATED)"
+       depends on SERIAL_8250
+       default y
+       ---help---
+         In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to
+         accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
+         8250.nr_uarts=4. We now renamed the module back to 8250, but if
+         anybody noticed in 3.7 and changed their userspace we still have to
+         keep the 8350_core.* options around until they revert the changes
+         they already did.
+
+         If 8250 is built as a module, this adds 8250_core alias instead. 
+
+         If you did not notice yet and/or you have userspace from pre-3.7, it
+         is safe (and recommended) to say N here.
+
 config SERIAL_8250_PNP
        bool "8250/16550 PNP device support" if EXPERT
        depends on SERIAL_8250 && PNP
index a23838a4d5353773c1ec8d15adbecec3ceee123c..36d68d0543072c21f48807d3b932ed426e843685 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the 8250 serial device drivers.
 #
 
-obj-$(CONFIG_SERIAL_8250)              += 8250_core.o
-8250_core-y                            := 8250.o
-8250_core-$(CONFIG_SERIAL_8250_PNP)    += 8250_pnp.o
-8250_core-$(CONFIG_SERIAL_8250_DMA)    += 8250_dma.o
+obj-$(CONFIG_SERIAL_8250)              += 8250.o
+8250-y                                 := 8250_core.o
+8250-$(CONFIG_SERIAL_8250_PNP)         += 8250_pnp.o
+8250-$(CONFIG_SERIAL_8250_DMA)         += 8250_dma.o
 obj-$(CONFIG_SERIAL_8250_GSC)          += 8250_gsc.o
 obj-$(CONFIG_SERIAL_8250_PCI)          += 8250_pci.o
 obj-$(CONFIG_SERIAL_8250_HP300)                += 8250_hp300.o
index d4a7c241b751279d3422f6d024b45cbb1a04a2de..3467462869ce2dce88ca8832002d7f294d5eaae6 100644 (file)
@@ -158,7 +158,7 @@ struct atmel_uart_port {
 };
 
 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
-static unsigned long atmel_ports_in_use;
+static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
 
 #ifdef SUPPORT_SYSRQ
 static struct console atmel_console;
@@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev)
        if (ret < 0)
                /* port id not found in platform data nor device-tree aliases:
                 * auto-enumerate it */
-               ret = find_first_zero_bit(&atmel_ports_in_use,
-                               sizeof(atmel_ports_in_use));
+               ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
 
-       if (ret > ATMEL_MAX_UART) {
+       if (ret >= ATMEL_MAX_UART) {
                ret = -ENODEV;
                goto err;
        }
 
-       if (test_and_set_bit(ret, &atmel_ports_in_use)) {
+       if (test_and_set_bit(ret, atmel_ports_in_use)) {
                /* port already in use */
                ret = -EBUSY;
                goto err;
@@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
 
        /* "port" is allocated statically, so we shouldn't free it */
 
-       clear_bit(port->line, &atmel_ports_in_use);
+       clear_bit(port->line, atmel_ports_in_use);
 
        clk_put(atmel_port->clk);
 
index ba451c7209fc6b9bbaaf47b9e2d2458f202feaf8..f36bbba1ac8b9070cc7cefafb8a33265a7872253 100644 (file)
@@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port)
        /* Receive Timeout register is enabled with value of 10 */
        xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
 
+       /* Clear out any pending interrupts before enabling them */
+       xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
 
        /* Set the Interrupt Registers with desired interrupts */
        xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
index e4ca345873c3279761f8dcf6cee3f06053205088..d7799deacb21a66805d8e4308bcc199b64894fbf 100644 (file)
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
 static struct vcs_poll_data *
 vcs_poll_data_get(struct file *file)
 {
-       struct vcs_poll_data *poll = file->private_data;
+       struct vcs_poll_data *poll = file->private_data, *kill = NULL;
 
        if (poll)
                return poll;
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
                file->private_data = poll;
        } else {
                /* someone else raced ahead of us */
-               vcs_poll_data_free(poll);
+               kill = poll;
                poll = file->private_data;
        }
        spin_unlock(&file->f_lock);
+       if (kill)
+               vcs_poll_data_free(kill);
 
        return poll;
 }
index 99b34a30354f4a2a3e06deef32addaefb3e46e61..f9ec44cbb82fb1ff6fa8e620367de8a9daec2c70 100644 (file)
@@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
 }
 EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
 
+int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+       if (!hcd->driver->find_raw_port_number)
+               return port1;
+
+       return hcd->driver->find_raw_port_number(hcd, port1);
+}
+
 static int usb_hcd_request_irqs(struct usb_hcd *hcd,
                unsigned int irqnum, unsigned long irqflags)
 {
index b6f4bad3f756eefb173097767ffde6970e06220e..255c14464bf2ea7a30dffab6fa1770c7b030c0aa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/usb/hcd.h>
 #include <acpi/acpi_bus.h>
 
 #include "usb.h"
@@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
                 * connected to.
                 */
                if (!udev->parent) {
-                       *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+                       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+                       int raw_port_num;
+
+                       raw_port_num = usb_hcd_find_raw_port_number(hcd,
                                port_num);
+                       *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+                               raw_port_num);
                        if (!*handle)
                                return -ENODEV;
                } else {
index 5a0c541daf893a33a428bedb8d68ec1fa72d4234..c7525b1cad74a0f92dc1057621dca509f80d671c 100644 (file)
@@ -145,6 +145,7 @@ config USB_LPC32XX
        tristate "LPC32XX USB Peripheral Controller"
        depends on ARCH_LPC32XX
        select USB_ISP1301
+       select USB_OTG_UTILS
        help
           This option selects the USB device controller in the LPC32xx SoC.
 
index b476daf49f6f3c6cf226b920f5c8ff01ca5d23aa..010f686d8881fc9796fd3bbee1c1f5c3aa71e878 100644 (file)
@@ -1214,6 +1214,7 @@ itd_urb_transaction (
 
                memset (itd, 0, sizeof *itd);
                itd->itd_dma = itd_dma;
+               itd->frame = 9999;              /* an invalid value */
                list_add (&itd->itd_list, &sched->td_list);
        }
        spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1915,6 +1916,7 @@ sitd_urb_transaction (
 
                memset (sitd, 0, sizeof *sitd);
                sitd->sitd_dma = sitd_dma;
+               sitd->frame = 9999;             /* an invalid value */
                list_add (&sitd->sitd_list, &iso_sched->td_list);
        }
 
index 35616ffbe3ae7435155bf29196d295f35a719b71..6dc238c592bc9aef5e3d8a564240b085e401cf6b 100644 (file)
@@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
  * is attached to (or the roothub port its ancestor hub is attached to).  All we
  * know is the index of that port under either the USB 2.0 or the USB 3.0
  * roothub, but that doesn't give us the real index into the HW port status
- * registers.  Scan through the xHCI roothub port array, looking for the Nth
- * entry of the correct port speed.  Return the port number of that entry.
+ * registers. Call xhci_find_raw_port_number() to get real index.
  */
 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
                struct usb_device *udev)
 {
        struct usb_device *top_dev;
-       unsigned int num_similar_speed_ports;
-       unsigned int faked_port_num;
-       int i;
+       struct usb_hcd *hcd;
+
+       if (udev->speed == USB_SPEED_SUPER)
+               hcd = xhci->shared_hcd;
+       else
+               hcd = xhci->main_hcd;
 
        for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
                        top_dev = top_dev->parent)
                /* Found device below root hub */;
-       faked_port_num = top_dev->portnum;
-       for (i = 0, num_similar_speed_ports = 0;
-                       i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
-               u8 port_speed = xhci->port_array[i];
-
-               /*
-                * Skip ports that don't have known speeds, or have duplicate
-                * Extended Capabilities port speed entries.
-                */
-               if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
-                       continue;
 
-               /*
-                * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
-                * 1.1 ports are under the USB 2.0 hub.  If the port speed
-                * matches the device speed, it's a similar speed port.
-                */
-               if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
-                       num_similar_speed_ports++;
-               if (num_similar_speed_ports == faked_port_num)
-                       /* Roothub ports are numbered from 1 to N */
-                       return i+1;
-       }
-       return 0;
+       return  xhci_find_raw_port_number(hcd, top_dev->portnum);
 }
 
 /* Setup an xHCI virtual device for a Set Address command */
index af259e0ec172aa4a5664ca15acb2adf8d90bf02c..1a30c380043ce258aa660e67edb38210707fffd6 100644 (file)
@@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
        .set_usb2_hw_lpm =      xhci_set_usb2_hardware_lpm,
        .enable_usb3_lpm_timeout =      xhci_enable_usb3_lpm_timeout,
        .disable_usb3_lpm_timeout =     xhci_disable_usb3_lpm_timeout,
+       .find_raw_port_number = xhci_find_raw_port_number,
 };
 
 /*-------------------------------------------------------------------------*/
index 88287546530179a867da6f3a9d88f615d87f9a81..1969c001b3f9a8bacbe926dfc4777c95206101a1 100644 (file)
@@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci,
        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
        if ((port_id <= 0) || (port_id > max_ports)) {
                xhci_warn(xhci, "Invalid port id %d\n", port_id);
-               bogus_port_status = true;
-               goto cleanup;
+               inc_deq(xhci, xhci->event_ring);
+               return;
        }
 
        /* Figure out which usb_hcd this port is attached to:
         * is it a USB 3.0 port or a USB 2.0/1.1 port?
         */
        major_revision = xhci->port_array[port_id - 1];
+
+       /* Find the right roothub. */
+       hcd = xhci_to_hcd(xhci);
+       if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+               hcd = xhci->shared_hcd;
+
        if (major_revision == 0) {
                xhci_warn(xhci, "Event for port %u not in "
                                "Extended Capabilities, ignoring.\n",
@@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * into the index into the ports on the correct split roothub, and the
         * correct bus_state structure.
         */
-       /* Find the right roothub. */
-       hcd = xhci_to_hcd(xhci);
-       if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
-               hcd = xhci->shared_hcd;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
        if (hcd->speed == HCD_USB3)
                port_array = xhci->usb3_ports;
@@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                if (event_trb != ep_ring->dequeue &&
                                event_trb != td->last_trb)
                        td->urb->actual_length =
-                               td->urb->transfer_buffer_length
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               td->urb->transfer_buffer_length -
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                else
                        td->urb->actual_length = 0;
 
@@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                /* Maybe the event was for the data stage? */
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                        xhci_dbg(xhci, "Waiting for status "
                                        "stage event\n");
                        return 0;
@@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        /* handle completion code */
        switch (trb_comp_code) {
        case COMP_SUCCESS:
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
                        frame->status = 0;
                        break;
                }
@@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
                }
                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-                       TRB_LEN(le32_to_cpu(event->transfer_len));
+                       EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
 
                if (trb_comp_code != COMP_STOP_INVAL) {
                        frame->actual_length = len;
@@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
        case COMP_SUCCESS:
                /* Double check that the HW transferred everything. */
                if (event_trb != td->last_trb ||
-                               TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+                   EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        xhci_warn(xhci, "WARN Successful completion "
                                        "on short TX\n");
                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                "%d bytes untransferred\n",
                                td->urb->ep->desc.bEndpointAddress,
                                td->urb->transfer_buffer_length,
-                               TRB_LEN(le32_to_cpu(event->transfer_len)));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
        /* Fast path - was this the last TRB in the TD for this URB? */
        if (event_trb == td->last_trb) {
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                        if (td->urb->transfer_buffer_length <
                                        td->urb->actual_length) {
                                xhci_warn(xhci, "HC gave bad length "
                                                "of %d bytes left\n",
-                                         TRB_LEN(le32_to_cpu(event->transfer_len)));
+                                         EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
                                td->urb->actual_length = 0;
                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
                                        *status = -EREMOTEIO;
@@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                if (trb_comp_code != COMP_STOP_INVAL)
                        td->urb->actual_length +=
                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
        }
 
        return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
         * transfer type
         */
        case COMP_SUCCESS:
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
                        break;
                if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
                        trb_comp_code = COMP_SHORT_TX;
@@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * TD list.
                 */
                if (list_empty(&ep_ring->td_list)) {
-                       xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
-                                       "with no TDs queued?\n",
-                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
-                                 ep_index);
-                       xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
-                                (le32_to_cpu(event->flags) &
-                                 TRB_TYPE_BITMASK)>>10);
-                       xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+                       /*
+                        * A stopped endpoint may generate an extra completion
+                        * event if the device was suspended.  Don't print
+                        * warnings.
+                        */
+                       if (!(trb_comp_code == COMP_STOP ||
+                                               trb_comp_code == COMP_STOP_INVAL)) {
+                               xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+                                               TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+                                               ep_index);
+                               xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+                                               (le32_to_cpu(event->flags) &
+                                                TRB_TYPE_BITMASK)>>10);
+                               xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+                       }
                        if (ep->skip) {
                                ep->skip = false;
                                xhci_dbg(xhci, "td_list is empty while skip "
index 849470b188313ece555d4a9c20aa9e70bb92ff2a..53b8f89a0b1c7e6199c95672c85d3d27fb3e0b90 100644 (file)
@@ -3779,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        return 0;
 }
 
+/*
+ * Transfer the port index into real index in the HW port status
+ * registers. Caculate offset between the port's PORTSC register
+ * and port status base. Divide the number of per port register
+ * to get the real index. The raw port number bases 1.
+ */
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
+       __le32 __iomem *addr;
+       int raw_port;
+
+       if (hcd->speed != HCD_USB3)
+               addr = xhci->usb2_ports[port1 - 1];
+       else
+               addr = xhci->usb3_ports[port1 - 1];
+
+       raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
+       return raw_port;
+}
+
 #ifdef CONFIG_USB_SUSPEND
 
 /* BESL to HIRD Encoding array for USB2 LPM */
index 2c510e4a7d4c61b92df567c83d01ffba96aa849c..63582719e0fb26cafb4d19f5248c2b5f9aa8a43a 100644 (file)
@@ -972,6 +972,10 @@ struct xhci_transfer_event {
        __le32  flags;
 };
 
+/* Transfer event TRB length bit mask */
+/* bits 0:23 */
+#define        EVENT_TRB_LEN(p)                ((p) & 0xffffff)
+
 /** Transfer Event bit fields **/
 #define        TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
 
@@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
                char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
 
 #ifdef CONFIG_PM
 int xhci_bus_suspend(struct usb_hcd *hcd);
index 65217a5900689028e68a61e28c8011fbc9cbbaea..90549382eba5f139c6d54b7617319132189e6596 100644 (file)
@@ -38,6 +38,7 @@ config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
        depends on USB || USB_GADGET
        depends on I2C
+       select USB_OTG_UTILS
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
          This chip is typically used as USB transceiver for USB host, gadget
index d4809d5514738a15496c216eea033ef788baaf62..9886180e45f1b5d73ddc1903f2e13eecb6297013 100644 (file)
@@ -640,6 +640,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
        { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
        { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
+       { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
index 9d359e189a645f7dcd13222433b9377787f81a34..e79861eeed4cca1471e4531ff727f0f8425e7892 100644 (file)
 #define CONTEC_VID             0x06CE  /* Vendor ID */
 #define CONTEC_COM1USBH_PID    0x8311  /* COM-1(USB)H */
 
+/*
+ * Mitsubishi Electric Corp. (http://www.meau.com)
+ * Submitted by Konstantin Holoborodko
+ */
+#define MITSUBISHI_VID         0x06D3
+#define MITSUBISHI_FXUSB_PID   0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
+
 /*
  * Definitions for B&B Electronics products.
  */
index 2e70efa08b77121f5d248413a95b14ea79095dda..5d9b178484fdf805bfd9a6e264732d5082216088 100644 (file)
@@ -903,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                port->port.ops = &serial_port_ops;
                port->serial = serial;
                spin_lock_init(&port->lock);
+               init_waitqueue_head(&port->delta_msr_wait);
                /* Keep this for private driver use for the moment but
                   should probably go away */
                INIT_WORK(&port->work, usb_serial_port_work);
index 43fb11ee2e8ded2cc2c70b954e3b68bdc06e1760..2968b4934659aab01dd626334a9ab7fc0e607383 100644 (file)
@@ -60,6 +60,15 @@ enum {
        VHOST_SCSI_VQ_IO = 2,
 };
 
+/*
+ * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
+ * kernel but disabling it helps.
+ * TODO: debug and remove the workaround.
+ */
+enum {
+       VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
+};
+
 #define VHOST_SCSI_MAX_TARGET  256
 #define VHOST_SCSI_MAX_VQ      128
 
@@ -946,7 +955,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 
 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
 {
-       if (features & ~VHOST_FEATURES)
+       if (features & ~VHOST_SCSI_FEATURES)
                return -EOPNOTSUPP;
 
        mutex_lock(&vs->dev.mutex);
@@ -992,7 +1001,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
                        return -EFAULT;
                return 0;
        case VHOST_GET_FEATURES:
-               features = VHOST_FEATURES;
+               features = VHOST_SCSI_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
                        return -EFAULT;
                return 0;
index 7c254084b6a044da4658071a864e9dabbfdd16dc..ccd44b0c77a0708a584525a0d075bd5d53553922 100644 (file)
@@ -1645,6 +1645,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
        if (!fb_info->modelist.prev || !fb_info->modelist.next)
                INIT_LIST_HEAD(&fb_info->modelist);
 
+       if (fb_info->skip_vt_switch)
+               pm_vt_switch_required(fb_info->dev, false);
+       else
+               pm_vt_switch_required(fb_info->dev, true);
+
        fb_var_to_videomode(&mode, &fb_info->var);
        fb_add_videomode(&mode, &fb_info->modelist);
        registered_fb[i] = fb_info;
@@ -1679,6 +1684,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
        if (ret)
                return -EINVAL;
 
+       pm_vt_switch_unregister(fb_info->dev);
+
        unlink_framebuffer(fb_info);
        if (fb_info->pixmap.addr &&
            (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
index ab23c9b79143d3c236905cd28a35d35dcaf5ff80..40178338b61994a024482db29107bd5efaa9454f 100644 (file)
@@ -1,9 +1,24 @@
 /*
  * Copyright (C) 2012 Avionic Design GmbH
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #include <linux/bitops.h>
index 755556ca5b2d988ec68b8cc3c1f9bbe7aadf6a34..45169cbaba6e288e714e2ce414dff1061a4569af 100644 (file)
@@ -169,6 +169,7 @@ struct mxsfb_info {
        unsigned dotclk_delay;
        const struct mxsfb_devdata *devdata;
        int mapped;
+       u32 sync;
 };
 
 #define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info)
                vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
        if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
                vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
-       if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+       if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
                vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
-       if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT)
+       if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
                vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
 
        writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&fb_info->modelist);
 
+       host->sync = pdata->sync;
+
        ret = mxsfb_init_fbinfo(host);
        if (ret != 0)
                goto error_init_fb;
index e31f5b33b501e155fed50e9a5463614e9cdc1690..d40612c31a989d9437e482496ff52c6f451404d8 100644 (file)
@@ -32,6 +32,8 @@
 
 #include <linux/omap-dma.h>
 
+#include <mach/hardware.h>
+
 #include "omapfb.h"
 #include "lcdc.h"
 
index 72699f88c00215bc41f5f775f17caf8399dc1e23..d7f69c09ecf12d2cb50586f89f5536d5ca27adac 100644 (file)
 #include <linux/sched.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define MIPID_CMD_READ_DISP_ID         0x04
 #define MIPID_CMD_READ_RED             0x06
@@ -336,8 +338,6 @@ static int acx565akm_bl_update_status(struct backlight_device *dev)
        r = 0;
        if (md->has_bc)
                acx565akm_set_brightness(md, level);
-       else if (md->dssdev->set_backlight)
-               r = md->dssdev->set_backlight(md->dssdev, level);
        else
                r = -ENODEV;
 
@@ -352,7 +352,7 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", __func__);
 
-       if (!md->has_bc && md->dssdev->set_backlight == NULL)
+       if (!md->has_bc)
                return -ENODEV;
 
        if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
@@ -496,21 +496,38 @@ static struct omap_video_timings acx_panel_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static struct panel_acx565akm_data *get_panel_data(struct omap_dss_device *dssdev)
+{
+       return (struct panel_acx565akm_data *) dssdev->data;
+}
+
 static int acx_panel_probe(struct omap_dss_device *dssdev)
 {
        int r;
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
        struct backlight_device *bldev;
        int max_brightness, brightness;
        struct backlight_properties props;
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
 
+       if (!panel_data)
+               return -EINVAL;
+
        /* FIXME AC bias ? */
        dssdev->panel.timings = acx_panel_timings;
 
-       if (dssdev->platform_enable)
-               dssdev->platform_enable(dssdev);
+       if (gpio_is_valid(panel_data->reset_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->reset_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd reset");
+               if (r)
+                       return r;
+       }
+
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 1);
+
        /*
         * After reset we have to wait 5 msec before the first
         * command can be sent.
@@ -522,8 +539,9 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
        r = panel_detect(md);
        if (r) {
                dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
-               if (!md->enabled && dssdev->platform_disable)
-                       dssdev->platform_disable(dssdev);
+               if (!md->enabled && gpio_is_valid(panel_data->reset_gpio))
+                       gpio_set_value(panel_data->reset_gpio, 0);
+
                return r;
        }
 
@@ -532,8 +550,8 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
        mutex_unlock(&acx_dev.mutex);
 
        if (!md->enabled) {
-               if (dssdev->platform_disable)
-                       dssdev->platform_disable(dssdev);
+               if (gpio_is_valid(panel_data->reset_gpio))
+                       gpio_set_value(panel_data->reset_gpio, 0);
        }
 
        /*------- Backlight control --------*/
@@ -557,15 +575,10 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
                md->cabc_mode = get_hw_cabc_mode(md);
        }
 
-       if (md->has_bc)
-               max_brightness = 255;
-       else
-               max_brightness = dssdev->max_backlight_level;
+       max_brightness = 255;
 
        if (md->has_bc)
                brightness = acx565akm_get_actual_brightness(md);
-       else if (dssdev->get_backlight)
-               brightness = dssdev->get_backlight(dssdev);
        else
                brightness = 0;
 
@@ -591,6 +604,7 @@ static void acx_panel_remove(struct omap_dss_device *dssdev)
 static int acx_panel_power_on(struct omap_dss_device *dssdev)
 {
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
        int r;
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
@@ -612,11 +626,8 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
        /*FIXME tweak me */
        msleep(50);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto fail;
-       }
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 1);
 
        if (md->enabled) {
                dev_dbg(&md->spi->dev, "panel already enabled\n");
@@ -645,8 +656,7 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
        mutex_unlock(&md->mutex);
 
        return acx565akm_bl_update_status(md->bl_dev);
-fail:
-       omapdss_sdi_display_disable(dssdev);
+
 fail_unlock:
        mutex_unlock(&md->mutex);
        return r;
@@ -655,6 +665,7 @@ fail_unlock:
 static void acx_panel_power_off(struct omap_dss_device *dssdev)
 {
        struct acx565akm_device *md = &acx_dev;
+       struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
 
        dev_dbg(&dssdev->dev, "%s\n", __func__);
 
@@ -678,8 +689,8 @@ static void acx_panel_power_off(struct omap_dss_device *dssdev)
         */
        msleep(50);
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(panel_data->reset_gpio))
+               gpio_set_value(panel_data->reset_gpio, 0);
 
        /* FIXME need to tweak this delay */
        msleep(100);
index c904f42d81c18c6fd6d5c7ba2e66a51e73fedc8c..97363f73368323353b245b5a46c0a065b8788fb7 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/gpio.h>
 #include <video/omapdss.h>
 
-#include <video/omap-panel-generic-dpi.h>
+#include <video/omap-panel-data.h>
 
 struct panel_config {
        struct omap_video_timings timings;
@@ -533,7 +534,7 @@ static inline struct panel_generic_dpi_data
 
 static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
 {
-       int r;
+       int r, i;
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
        struct panel_config *panel_config = drv_data->panel_config;
@@ -552,15 +553,13 @@ static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
        if (panel_config->power_on_delay)
                msleep(panel_config->power_on_delay);
 
-       if (panel_data->platform_enable) {
-               r = panel_data->platform_enable(dssdev);
-               if (r)
-                       goto err1;
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 0 : 1);
        }
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
@@ -570,12 +569,15 @@ static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
        struct panel_config *panel_config = drv_data->panel_config;
+       int i;
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (panel_data->platform_disable)
-               panel_data->platform_disable(dssdev);
+       for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 1 : 0);
+       }
 
        /* wait couple of vsyncs after disabling the LCD */
        if (panel_config->power_off_delay)
@@ -589,7 +591,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
        struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct panel_config *panel_config = NULL;
        struct panel_drv_data *drv_data = NULL;
-       int i;
+       int i, r;
 
        dev_dbg(&dssdev->dev, "probe\n");
 
@@ -606,9 +608,18 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
        if (!panel_config)
                return -EINVAL;
 
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ?
+                               GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+                               "panel gpio");
+               if (r)
+                       return r;
+       }
+
        dssdev->panel.timings = panel_config->timings;
 
-       drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+       drv_data = devm_kzalloc(&dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
        if (!drv_data)
                return -ENOMEM;
 
@@ -624,12 +635,8 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
 
 static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
-
        dev_dbg(&dssdev->dev, "remove\n");
 
-       kfree(drv_data);
-
        dev_set_drvdata(&dssdev->dev, NULL);
 }
 
index 6e5abe8fd2ddbe9f94dcd384c9235c56de1dd3d6..4ea6548c0ae93c2c42d8ad1b7d6965eae435939a 100644 (file)
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/mutex.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 struct lb035q02_data {
        struct mutex lock;
@@ -48,9 +50,16 @@ static struct omap_video_timings lb035q02_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+       return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
 static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
 {
-       int r;
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+       int r, i;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
                return 0;
@@ -62,54 +71,65 @@ static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 0 : 1);
        }
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
 
 static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
 {
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+       int i;
+
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       for (i = panel_data->num_gpios - 1; i >= 0; --i) {
+               gpio_set_value_cansleep(panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ? 1 : 0);
+       }
 
        omapdss_dpi_display_disable(dssdev);
 }
 
 static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
 {
+       struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
        struct lb035q02_data *ld;
-       int r;
+       int r, i;
+
+       if (!panel_data)
+               return -EINVAL;
 
        dssdev->panel.timings = lb035q02_timings;
 
-       ld = kzalloc(sizeof(*ld), GFP_KERNEL);
-       if (!ld) {
-               r = -ENOMEM;
-               goto err;
+       ld = devm_kzalloc(&dssdev->dev, sizeof(*ld), GFP_KERNEL);
+       if (!ld)
+               return -ENOMEM;
+
+       for (i = 0; i < panel_data->num_gpios; ++i) {
+               r = devm_gpio_request_one(&dssdev->dev, panel_data->gpios[i],
+                               panel_data->gpio_invert[i] ?
+                               GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+                               "panel gpio");
+               if (r)
+                       return r;
        }
+
        mutex_init(&ld->lock);
        dev_set_drvdata(&dssdev->dev, ld);
+
        return 0;
-err:
-       return r;
 }
 
 static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
-
-       kfree(ld);
 }
 
 static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
index dd129475080297de33f239dfc903bb990fd373c5..f94ead6a3183c0d893b860fa4913aaf0a1180787 100644 (file)
@@ -5,11 +5,10 @@
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-n8x0.h>
+#include <video/omap-panel-data.h>
 
 #define BLIZZARD_REV_CODE                      0x00
 #define BLIZZARD_CONFIG                        0x02
@@ -69,7 +68,6 @@ static struct panel_drv_data {
 
        struct omap_dss_device *dssdev;
        struct spi_device *spidev;
-       struct backlight_device *bldev;
 
        int blizzard_ver;
 } s_drv_data;
@@ -297,12 +295,6 @@ static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
 
        gpio_direction_output(bdata->ctrl_pwrdown, 1);
 
-       if (bdata->platform_enable) {
-               r = bdata->platform_enable(dssdev);
-               if (r)
-                       goto err_plat_en;
-       }
-
        omapdss_rfbi_set_size(dssdev, dssdev->panel.timings.x_res,
                dssdev->panel.timings.y_res);
        omapdss_rfbi_set_pixel_size(dssdev, dssdev->ctrl.pixel_size);
@@ -375,9 +367,6 @@ err_inv_panel:
 err_inv_chip:
        omapdss_rfbi_display_disable(dssdev);
 err_rfbi_en:
-       if (bdata->platform_disable)
-               bdata->platform_disable(dssdev);
-err_plat_en:
        gpio_direction_output(bdata->ctrl_pwrdown, 0);
        return r;
 }
@@ -394,9 +383,6 @@ static void n8x0_panel_power_off(struct omap_dss_device *dssdev)
        send_display_off(spi);
        send_sleep_in(spi);
 
-       if (bdata->platform_disable)
-               bdata->platform_disable(dssdev);
-
        /*
         * HACK: we should turn off the panel here, but there is some problem
         * with the initialization sequence, and we fail to init the panel if we
@@ -424,54 +410,10 @@ static const struct rfbi_timings n8x0_panel_timings = {
        .cs_pulse_width = 0,
 };
 
-static int n8x0_bl_update_status(struct backlight_device *dev)
-{
-       struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
-       struct panel_n8x0_data *bdata = get_board_data(dssdev);
-       struct panel_drv_data *ddata = get_drv_data(dssdev);
-       int r;
-       int level;
-
-       mutex_lock(&ddata->lock);
-
-       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-                       dev->props.power == FB_BLANK_UNBLANK)
-               level = dev->props.brightness;
-       else
-               level = 0;
-
-       dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
-
-       if (!bdata->set_backlight)
-               r = -EINVAL;
-       else
-               r = bdata->set_backlight(dssdev, level);
-
-       mutex_unlock(&ddata->lock);
-
-       return r;
-}
-
-static int n8x0_bl_get_intensity(struct backlight_device *dev)
-{
-       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
-                       dev->props.power == FB_BLANK_UNBLANK)
-               return dev->props.brightness;
-
-       return 0;
-}
-
-static const struct backlight_ops n8x0_bl_ops = {
-       .get_brightness = n8x0_bl_get_intensity,
-       .update_status  = n8x0_bl_update_status,
-};
-
 static int n8x0_panel_probe(struct omap_dss_device *dssdev)
 {
        struct panel_n8x0_data *bdata = get_board_data(dssdev);
        struct panel_drv_data *ddata;
-       struct backlight_device *bldev;
-       struct backlight_properties props;
        int r;
 
        dev_dbg(&dssdev->dev, "probe\n");
@@ -491,40 +433,27 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev)
        dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
 
-       memset(&props, 0, sizeof(props));
-       props.max_brightness = 127;
-       props.type = BACKLIGHT_PLATFORM;
-       bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
-                       dssdev, &n8x0_bl_ops, &props);
-       if (IS_ERR(bldev)) {
-               r = PTR_ERR(bldev);
-               dev_err(&dssdev->dev, "register backlight failed\n");
-               return r;
+       if (gpio_is_valid(bdata->panel_reset)) {
+               r = devm_gpio_request_one(&dssdev->dev, bdata->panel_reset,
+                               GPIOF_OUT_INIT_LOW, "PANEL RESET");
+               if (r)
+                       return r;
        }
 
-       ddata->bldev = bldev;
-
-       bldev->props.fb_blank = FB_BLANK_UNBLANK;
-       bldev->props.power = FB_BLANK_UNBLANK;
-       bldev->props.brightness = 127;
-
-       n8x0_bl_update_status(bldev);
+       if (gpio_is_valid(bdata->ctrl_pwrdown)) {
+               r = devm_gpio_request_one(&dssdev->dev, bdata->ctrl_pwrdown,
+                               GPIOF_OUT_INIT_LOW, "PANEL PWRDOWN");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void n8x0_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct panel_drv_data *ddata = get_drv_data(dssdev);
-       struct backlight_device *bldev;
-
        dev_dbg(&dssdev->dev, "remove\n");
 
-       bldev = ddata->bldev;
-       bldev->props.power = FB_BLANK_POWERDOWN;
-       n8x0_bl_update_status(bldev);
-       backlight_device_unregister(bldev);
-
        dev_set_drvdata(&dssdev->dev, NULL);
 }
 
index c4e9c2b1b465c135e6ddb2c33d01161e41f737a8..20c3cd91ff9b2cae8a432b293a614711e310fcf6 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define LCD_XRES               800
 #define LCD_YRES               480
  */
 #define LCD_PIXEL_CLOCK                23800
 
-struct nec_8048_data {
-       struct backlight_device *bl;
-};
-
 static const struct {
        unsigned char addr;
        unsigned char dat;
@@ -84,93 +81,47 @@ static struct omap_video_timings nec_8048_panel_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
 };
 
-static int nec_8048_bl_update_status(struct backlight_device *bl)
-{
-       struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
-       int level;
-
-       if (!dssdev->set_backlight)
-               return -EINVAL;
-
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               level = bl->props.brightness;
-       else
-               level = 0;
-
-       return dssdev->set_backlight(dssdev, level);
-}
-
-static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+static inline struct panel_nec_nl8048_data
+*get_panel_data(const struct omap_dss_device *dssdev)
 {
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               return bl->props.brightness;
-
-       return 0;
+       return (struct panel_nec_nl8048_data *) dssdev->data;
 }
 
-static const struct backlight_ops nec_8048_bl_ops = {
-       .get_brightness = nec_8048_bl_get_brightness,
-       .update_status  = nec_8048_bl_update_status,
-};
-
 static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
 {
-       struct backlight_device *bl;
-       struct nec_8048_data *necd;
-       struct backlight_properties props;
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
        int r;
 
-       dssdev->panel.timings = nec_8048_panel_timings;
-
-       necd = kzalloc(sizeof(*necd), GFP_KERNEL);
-       if (!necd)
-               return -ENOMEM;
-
-       dev_set_drvdata(&dssdev->dev, necd);
+       if (!pd)
+               return -EINVAL;
 
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.max_brightness = 255;
+       dssdev->panel.timings = nec_8048_panel_timings;
 
-       bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
-                       &nec_8048_bl_ops, &props);
-       if (IS_ERR(bl)) {
-               r = PTR_ERR(bl);
-               kfree(necd);
-               return r;
+       if (gpio_is_valid(pd->qvga_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->qvga_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd QVGA");
+               if (r)
+                       return r;
        }
-       necd->bl = bl;
-
-       bl->props.fb_blank = FB_BLANK_UNBLANK;
-       bl->props.power = FB_BLANK_UNBLANK;
-       bl->props.max_brightness = dssdev->max_backlight_level;
-       bl->props.brightness = dssdev->max_backlight_level;
 
-       r = nec_8048_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->res_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->res_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd RES");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
-
-       bl->props.power = FB_BLANK_POWERDOWN;
-       nec_8048_bl_update_status(bl);
-       backlight_device_unregister(bl);
-
-       kfree(necd);
 }
 
 static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
 {
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
        int r;
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
                return 0;
@@ -182,36 +133,24 @@ static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
-
-       r = nec_8048_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->res_gpio))
+               gpio_set_value_cansleep(pd->res_gpio, 1);
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
+
 err0:
        return r;
 }
 
 static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
 {
-       struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = necd->bl;
+       struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
 
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       bl->props.brightness = 0;
-       nec_8048_bl_update_status(bl);
-
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(pd->res_gpio))
+               gpio_set_value_cansleep(pd->res_gpio, 0);
 
        omapdss_dpi_display_disable(dssdev);
 }
@@ -303,16 +242,22 @@ static int nec_8048_spi_remove(struct spi_device *spi)
        return 0;
 }
 
-static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+
+static int nec_8048_spi_suspend(struct device *dev)
 {
+       struct spi_device *spi = to_spi_device(dev);
+
        nec_8048_spi_send(spi, 2, 0x01);
        mdelay(40);
 
        return 0;
 }
 
-static int nec_8048_spi_resume(struct spi_device *spi)
+static int nec_8048_spi_resume(struct device *dev)
 {
+       struct spi_device *spi = to_spi_device(dev);
+
        /* reinitialize the panel */
        spi_setup(spi);
        nec_8048_spi_send(spi, 2, 0x00);
@@ -321,14 +266,20 @@ static int nec_8048_spi_resume(struct spi_device *spi)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(nec_8048_spi_pm_ops, nec_8048_spi_suspend,
+               nec_8048_spi_resume);
+#define NEC_8048_SPI_PM_OPS (&nec_8048_spi_pm_ops)
+#else
+#define NEC_8048_SPI_PM_OPS NULL
+#endif
+
 static struct spi_driver nec_8048_spi_driver = {
        .probe          = nec_8048_spi_probe,
        .remove         = nec_8048_spi_remove,
-       .suspend        = nec_8048_spi_suspend,
-       .resume         = nec_8048_spi_resume,
        .driver         = {
                .name   = "nec_8048_spi",
                .owner  = THIS_MODULE,
+               .pm     = NEC_8048_SPI_PM_OPS,
        },
 };
 
index 1b94018aac3e0c840f7c7f59e419c5e7eee993bb..62f2db04fbc85530ba88a1027dd9e124b86071d9 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-picodlp.h>
+#include <video/omap-panel-data.h>
 
 #include "panel-picodlp.h"
 
@@ -354,12 +354,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
        struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
        struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       return r;
-       }
-
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
        msleep(1);
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 1);
@@ -398,9 +392,6 @@ static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
 err:
        omapdss_dpi_display_disable(dssdev);
 err1:
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        return r;
 }
 
@@ -412,9 +403,6 @@ static void picodlp_panel_power_off(struct omap_dss_device *dssdev)
 
        gpio_set_value(picodlp_pdata->emu_done_gpio, 0);
        gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
-
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
 }
 
 static int picodlp_panel_probe(struct omap_dss_device *dssdev)
@@ -423,11 +411,14 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
        struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
        struct i2c_adapter *adapter;
        struct i2c_client *picodlp_i2c_client;
-       int r = 0, picodlp_adapter_id;
+       int r, picodlp_adapter_id;
 
        dssdev->panel.timings = pico_ls_timings;
 
-       picod =  kzalloc(sizeof(struct picodlp_data), GFP_KERNEL);
+       if (!picodlp_pdata)
+               return -EINVAL;
+
+       picod = devm_kzalloc(&dssdev->dev, sizeof(*picod), GFP_KERNEL);
        if (!picod)
                return -ENOMEM;
 
@@ -438,25 +429,37 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
        adapter = i2c_get_adapter(picodlp_adapter_id);
        if (!adapter) {
                dev_err(&dssdev->dev, "can't get i2c adapter\n");
-               r = -ENODEV;
-               goto err;
+               return -ENODEV;
        }
 
        picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
        if (!picodlp_i2c_client) {
                dev_err(&dssdev->dev, "can't add i2c device::"
                                         " picodlp_i2c_client is NULL\n");
-               r = -ENODEV;
-               goto err;
+               return -ENODEV;
        }
 
        picod->picodlp_i2c_client = picodlp_i2c_client;
 
        dev_set_drvdata(&dssdev->dev, picod);
-       return r;
-err:
-       kfree(picod);
-       return r;
+
+       if (gpio_is_valid(picodlp_pdata->emu_done_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev,
+                               picodlp_pdata->emu_done_gpio,
+                               GPIOF_IN, "DLP EMU DONE");
+               if (r)
+                       return r;
+       }
+
+       if (gpio_is_valid(picodlp_pdata->pwrgood_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev,
+                               picodlp_pdata->pwrgood_gpio,
+                               GPIOF_OUT_INIT_LOW, "DLP PWRGOOD");
+               if (r)
+                       return r;
+       }
+
+       return 0;
 }
 
 static void picodlp_panel_remove(struct omap_dss_device *dssdev)
index cada8c621e01bd2f4724c82f61b2557dce9237f5..74cb0eb453113e46856e9e224fba0f9873ca3926 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/backlight.h>
 #include <linux/fb.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/gpio.h>
 
 #include <video/omapdss.h>
-
-struct sharp_data {
-       struct backlight_device *bl;
-};
+#include <video/omap-panel-data.h>
 
 static struct omap_video_timings sharp_ls_timings = {
        .x_res = 480,
@@ -52,91 +49,67 @@ static struct omap_video_timings sharp_ls_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
-static int sharp_ls_bl_update_status(struct backlight_device *bl)
+static inline struct panel_sharp_ls037v7dw01_data
+*get_panel_data(const struct omap_dss_device *dssdev)
 {
-       struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
-       int level;
-
-       if (!dssdev->set_backlight)
-               return -EINVAL;
-
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               level = bl->props.brightness;
-       else
-               level = 0;
-
-       return dssdev->set_backlight(dssdev, level);
+       return (struct panel_sharp_ls037v7dw01_data *) dssdev->data;
 }
 
-static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
-{
-       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
-                       bl->props.power == FB_BLANK_UNBLANK)
-               return bl->props.brightness;
-
-       return 0;
-}
-
-static const struct backlight_ops sharp_ls_bl_ops = {
-       .get_brightness = sharp_ls_bl_get_brightness,
-       .update_status  = sharp_ls_bl_update_status,
-};
-
-
-
 static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
 {
-       struct backlight_properties props;
-       struct backlight_device *bl;
-       struct sharp_data *sd;
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
        int r;
 
+       if (!pd)
+               return -EINVAL;
+
        dssdev->panel.timings = sharp_ls_timings;
 
-       sd = kzalloc(sizeof(*sd), GFP_KERNEL);
-       if (!sd)
-               return -ENOMEM;
+       if (gpio_is_valid(pd->mo_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->mo_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd MO");
+               if (r)
+                       return r;
+       }
 
-       dev_set_drvdata(&dssdev->dev, sd);
+       if (gpio_is_valid(pd->lr_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->lr_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd LR");
+               if (r)
+                       return r;
+       }
 
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.max_brightness = dssdev->max_backlight_level;
-       props.type = BACKLIGHT_RAW;
+       if (gpio_is_valid(pd->ud_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->ud_gpio,
+                               GPIOF_OUT_INIT_HIGH, "lcd UD");
+               if (r)
+                       return r;
+       }
 
-       bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
-                       &sharp_ls_bl_ops, &props);
-       if (IS_ERR(bl)) {
-               r = PTR_ERR(bl);
-               kfree(sd);
-               return r;
+       if (gpio_is_valid(pd->resb_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->resb_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd RESB");
+               if (r)
+                       return r;
        }
-       sd->bl = bl;
 
-       bl->props.fb_blank = FB_BLANK_UNBLANK;
-       bl->props.power = FB_BLANK_UNBLANK;
-       bl->props.brightness = dssdev->max_backlight_level;
-       r = sharp_ls_bl_update_status(bl);
-       if (r < 0)
-               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+       if (gpio_is_valid(pd->ini_gpio)) {
+               r = devm_gpio_request_one(&dssdev->dev, pd->ini_gpio,
+                               GPIOF_OUT_INIT_LOW, "lcd INI");
+               if (r)
+                       return r;
+       }
 
        return 0;
 }
 
 static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
 {
-       struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
-       struct backlight_device *bl = sd->bl;
-
-       bl->props.power = FB_BLANK_POWERDOWN;
-       sharp_ls_bl_update_status(bl);
-       backlight_device_unregister(bl);
-
-       kfree(sd);
 }
 
 static int sharp_ls_power_on(struct omap_dss_device *dssdev)
 {
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
        int r = 0;
 
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
@@ -152,26 +125,29 @@ static int sharp_ls_power_on(struct omap_dss_device *dssdev)
        /* wait couple of vsyncs until enabling the LCD */
        msleep(50);
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
+       if (gpio_is_valid(pd->resb_gpio))
+               gpio_set_value_cansleep(pd->resb_gpio, 1);
+
+       if (gpio_is_valid(pd->ini_gpio))
+               gpio_set_value_cansleep(pd->ini_gpio, 1);
 
        return 0;
-err1:
-       omapdss_dpi_display_disable(dssdev);
 err0:
        return r;
 }
 
 static void sharp_ls_power_off(struct omap_dss_device *dssdev)
 {
+       struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
+
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
+       if (gpio_is_valid(pd->ini_gpio))
+               gpio_set_value_cansleep(pd->ini_gpio, 0);
+
+       if (gpio_is_valid(pd->resb_gpio))
+               gpio_set_value_cansleep(pd->resb_gpio, 0);
 
        /* wait at least 5 vsyncs after disabling the LCD */
 
index a32407a5735af59b123c8865c6dcf2ec3220db7a..c4f78bda115a405702bb76e719605be3fc553a1e 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/mutex.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-nokia-dsi.h>
+#include <video/omap-panel-data.h>
 #include <video/mipi_display.h>
 
 /* DSI Virtual channel. Hardcoded for now. */
@@ -54,61 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 static int taal_panel_reset(struct omap_dss_device *dssdev);
 
-/**
- * struct panel_config - panel configuration
- * @name: panel name
- * @type: panel type
- * @timings: panel resolution
- * @sleep: various panel specific delays, passed to msleep() if non-zero
- * @reset_sequence: reset sequence timings, passed to udelay() if non-zero
- * @regulators: array of panel regulators
- * @num_regulators: number of regulators in the array
- */
-struct panel_config {
-       const char *name;
-       int type;
-
-       struct omap_video_timings timings;
-
-       struct {
-               unsigned int sleep_in;
-               unsigned int sleep_out;
-               unsigned int hw_reset;
-               unsigned int enable_te;
-       } sleep;
-
-       struct {
-               unsigned int high;
-               unsigned int low;
-       } reset_sequence;
-
-};
-
-enum {
-       PANEL_TAAL,
-};
-
-static struct panel_config panel_configs[] = {
-       {
-               .name           = "taal",
-               .type           = PANEL_TAAL,
-               .timings        = {
-                       .x_res          = 864,
-                       .y_res          = 480,
-               },
-               .sleep          = {
-                       .sleep_in       = 5,
-                       .sleep_out      = 5,
-                       .hw_reset       = 5,
-                       .enable_te      = 100, /* possible panel bug */
-               },
-               .reset_sequence = {
-                       .high           = 10,
-                       .low            = 10,
-               },
-       },
-};
-
 struct taal_data {
        struct mutex lock;
 
@@ -121,9 +66,6 @@ struct taal_data {
 
        struct omap_dss_device *dssdev;
 
-       /* panel specific HW info */
-       struct panel_config *panel_config;
-
        /* panel HW configuration from DT or platform data */
        int reset_gpio;
        int ext_te_gpio;
@@ -134,8 +76,6 @@ struct taal_data {
 
        /* runtime variables */
        bool enabled;
-       u8 rotate;
-       bool mirror;
 
        bool te_enabled;
 
@@ -221,8 +161,7 @@ static int taal_sleep_in(struct taal_data *td)
 
        hw_guard_start(td, 120);
 
-       if (td->panel_config->sleep.sleep_in)
-               msleep(td->panel_config->sleep.sleep_in);
+       msleep(5);
 
        return 0;
 }
@@ -239,8 +178,7 @@ static int taal_sleep_out(struct taal_data *td)
 
        hw_guard_start(td, 120);
 
-       if (td->panel_config->sleep.sleep_out)
-               msleep(td->panel_config->sleep.sleep_out);
+       msleep(5);
 
        return 0;
 }
@@ -262,49 +200,6 @@ static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
        return 0;
 }
 
-static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
-{
-       int r;
-       u8 mode;
-       int b5, b6, b7;
-
-       r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode);
-       if (r)
-               return r;
-
-       switch (rotate) {
-       default:
-       case 0:
-               b7 = 0;
-               b6 = 0;
-               b5 = 0;
-               break;
-       case 1:
-               b7 = 0;
-               b6 = 1;
-               b5 = 1;
-               break;
-       case 2:
-               b7 = 1;
-               b6 = 1;
-               b5 = 0;
-               break;
-       case 3:
-               b7 = 1;
-               b6 = 0;
-               b5 = 1;
-               break;
-       }
-
-       if (mirror)
-               b6 = !b6;
-
-       mode &= ~((1<<7) | (1<<6) | (1<<5));
-       mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
-
-       return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode);
-}
-
 static int taal_set_update_window(struct taal_data *td,
                u16 x, u16 y, u16 w, u16 h)
 {
@@ -515,15 +410,8 @@ static const struct backlight_ops taal_bl_ops = {
 static void taal_get_resolution(struct omap_dss_device *dssdev,
                u16 *xres, u16 *yres)
 {
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-
-       if (td->rotate == 0 || td->rotate == 2) {
-               *xres = dssdev->panel.timings.x_res;
-               *yres = dssdev->panel.timings.y_res;
-       } else {
-               *yres = dssdev->panel.timings.x_res;
-               *xres = dssdev->panel.timings.y_res;
-       }
+       *xres = dssdev->panel.timings.x_res;
+       *yres = dssdev->panel.timings.y_res;
 }
 
 static ssize_t taal_num_errors_show(struct device *dev,
@@ -845,17 +733,14 @@ static void taal_hw_reset(struct omap_dss_device *dssdev)
                return;
 
        gpio_set_value(td->reset_gpio, 1);
-       if (td->panel_config->reset_sequence.high)
-               udelay(td->panel_config->reset_sequence.high);
+       udelay(10);
        /* reset the panel */
        gpio_set_value(td->reset_gpio, 0);
        /* assert reset */
-       if (td->panel_config->reset_sequence.low)
-               udelay(td->panel_config->reset_sequence.low);
+       udelay(10);
        gpio_set_value(td->reset_gpio, 1);
        /* wait after releasing reset */
-       if (td->panel_config->sleep.hw_reset)
-               msleep(td->panel_config->sleep.hw_reset);
+       msleep(5);
 }
 
 static void taal_probe_pdata(struct taal_data *td,
@@ -881,8 +766,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
        struct backlight_properties props;
        struct taal_data *td;
        struct backlight_device *bldev = NULL;
-       int r, i;
-       const char *panel_name;
+       int r;
 
        dev_dbg(&dssdev->dev, "probe\n");
 
@@ -897,26 +781,13 @@ static int taal_probe(struct omap_dss_device *dssdev)
                const struct nokia_dsi_panel_data *pdata = dssdev->data;
 
                taal_probe_pdata(td, pdata);
-
-               panel_name = pdata->name;
        } else {
                return -ENODEV;
        }
 
-       if (panel_name == NULL)
-               return -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(panel_configs); i++) {
-               if (strcmp(panel_name, panel_configs[i].name) == 0) {
-                       td->panel_config = &panel_configs[i];
-                       break;
-               }
-       }
-
-       if (!td->panel_config)
-               return -EINVAL;
-
-       dssdev->panel.timings = td->panel_config->timings;
+       dssdev->panel.timings.x_res = 864;
+       dssdev->panel.timings.y_res = 480;
+       dssdev->panel.timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
        dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
                OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1049,6 +920,15 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        u8 id1, id2, id3;
        int r;
+       struct omap_dss_dsi_config dsi_config = {
+               .mode = OMAP_DSS_DSI_CMD_MODE,
+               .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
+               .timings = &dssdev->panel.timings,
+               .hs_clk_min = 150000000,
+               .hs_clk_max = 300000000,
+               .lp_clk_min = 7000000,
+               .lp_clk_max = 10000000,
+       };
 
        r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
        if (r) {
@@ -1056,14 +936,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
                goto err0;
        };
 
-       omapdss_dsi_set_size(dssdev, dssdev->panel.timings.x_res,
-               dssdev->panel.timings.y_res);
-       omapdss_dsi_set_pixel_format(dssdev, OMAP_DSS_DSI_FMT_RGB888);
-       omapdss_dsi_set_operation_mode(dssdev, OMAP_DSS_DSI_CMD_MODE);
-
-       r = omapdss_dsi_set_clocks(dssdev, 216000000, 10000000);
+       r = omapdss_dsi_set_config(dssdev, &dsi_config);
        if (r) {
-               dev_err(&dssdev->dev, "failed to set HS and LP clocks\n");
+               dev_err(&dssdev->dev, "failed to configure DSI\n");
                goto err0;
        }
 
@@ -1086,8 +961,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
                goto err;
 
        /* on early Taal revisions CABC is broken */
-       if (td->panel_config->type == PANEL_TAAL &&
-               (id2 == 0x00 || id2 == 0xff || id2 == 0x81))
+       if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
                td->cabc_broken = true;
 
        r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
@@ -1104,10 +978,6 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        if (r)
                goto err;
 
-       r = taal_set_addr_mode(td, td->rotate, td->mirror);
-       if (r)
-               goto err;
-
        if (!td->cabc_broken) {
                r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
                if (r)
@@ -1129,8 +999,8 @@ static int taal_power_on(struct omap_dss_device *dssdev)
        td->enabled = 1;
 
        if (!td->intro_printed) {
-               dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n",
-                       td->panel_config->name, id1, id2, id3);
+               dev_info(&dssdev->dev, "panel revision %02x.%02x.%02x\n",
+                       id1, id2, id3);
                if (td->cabc_broken)
                        dev_info(&dssdev->dev,
                                        "old Taal version, CABC disabled\n");
@@ -1311,8 +1181,8 @@ static int taal_update(struct omap_dss_device *dssdev,
 
        /* XXX no need to send this every frame, but dsi break if not done */
        r = taal_set_update_window(td, 0, 0,
-                       td->panel_config->timings.x_res,
-                       td->panel_config->timings.y_res);
+                       dssdev->panel.timings.x_res,
+                       dssdev->panel.timings.y_res);
        if (r)
                goto err;
 
@@ -1365,8 +1235,8 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
        if (!gpio_is_valid(td->ext_te_gpio))
                omapdss_dsi_enable_te(dssdev, enable);
 
-       if (td->panel_config->sleep.enable_te)
-               msleep(td->panel_config->sleep.enable_te);
+       /* possible panel bug */
+       msleep(100);
 
        return r;
 }
@@ -1419,112 +1289,6 @@ static int taal_get_te(struct omap_dss_device *dssdev)
        return r;
 }
 
-static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       u16 dw, dh;
-       int r;
-
-       dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
-
-       mutex_lock(&td->lock);
-
-       if (td->rotate == rotate)
-               goto end;
-
-       dsi_bus_lock(dssdev);
-
-       if (td->enabled) {
-               r = taal_wake_up(dssdev);
-               if (r)
-                       goto err;
-
-               r = taal_set_addr_mode(td, rotate, td->mirror);
-               if (r)
-                       goto err;
-       }
-
-       if (rotate == 0 || rotate == 2) {
-               dw = dssdev->panel.timings.x_res;
-               dh = dssdev->panel.timings.y_res;
-       } else {
-               dw = dssdev->panel.timings.y_res;
-               dh = dssdev->panel.timings.x_res;
-       }
-
-       omapdss_dsi_set_size(dssdev, dw, dh);
-
-       td->rotate = rotate;
-
-       dsi_bus_unlock(dssdev);
-end:
-       mutex_unlock(&td->lock);
-       return 0;
-err:
-       dsi_bus_unlock(dssdev);
-       mutex_unlock(&td->lock);
-       return r;
-}
-
-static u8 taal_get_rotate(struct omap_dss_device *dssdev)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       mutex_lock(&td->lock);
-       r = td->rotate;
-       mutex_unlock(&td->lock);
-
-       return r;
-}
-
-static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       dev_dbg(&dssdev->dev, "mirror %d\n", enable);
-
-       mutex_lock(&td->lock);
-
-       if (td->mirror == enable)
-               goto end;
-
-       dsi_bus_lock(dssdev);
-       if (td->enabled) {
-               r = taal_wake_up(dssdev);
-               if (r)
-                       goto err;
-
-               r = taal_set_addr_mode(td, td->rotate, enable);
-               if (r)
-                       goto err;
-       }
-
-       td->mirror = enable;
-
-       dsi_bus_unlock(dssdev);
-end:
-       mutex_unlock(&td->lock);
-       return 0;
-err:
-       dsi_bus_unlock(dssdev);
-       mutex_unlock(&td->lock);
-       return r;
-}
-
-static bool taal_get_mirror(struct omap_dss_device *dssdev)
-{
-       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       int r;
-
-       mutex_lock(&td->lock);
-       r = td->mirror;
-       mutex_unlock(&td->lock);
-
-       return r;
-}
-
 static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
 {
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
@@ -1758,10 +1522,6 @@ static struct omap_dss_driver taal_driver = {
        .enable_te      = taal_enable_te,
        .get_te         = taal_get_te,
 
-       .set_rotate     = taal_rotate,
-       .get_rotate     = taal_get_rotate,
-       .set_mirror     = taal_mirror,
-       .get_mirror     = taal_get_mirror,
        .run_test       = taal_run_test,
        .memory_read    = taal_memory_read,
 
index 8281baafe1efd39127a628c70ecb3b720034d6f1..a1dba868cef107bf4158f7f11b35e510b23268fe 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/gpio.h>
 #include <drm/drm_edid.h>
 
-#include <video/omap-panel-tfp410.h>
+#include <video/omap-panel-data.h>
 
 static const struct omap_video_timings tfp410_default_timings = {
        .x_res          = 640,
index 6b6643911d296c707e7eaa29c30412081378cc5f..abf2bc4a18ab932593f01276fe9bee3966b69bc9 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 
 #include <video/omapdss.h>
+#include <video/omap-panel-data.h>
 
 #define TPO_R02_MODE(x)                ((x) & 7)
 #define TPO_R02_MODE_800x480   7
@@ -63,6 +64,9 @@ struct tpo_td043_device {
        u32 power_on_resume:1;
 };
 
+/* used to pass spi_device from SPI to DSS portion of the driver */
+static struct tpo_td043_device *g_tpo_td043;
+
 static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
 {
        struct spi_message      m;
@@ -275,9 +279,14 @@ static const struct omap_video_timings tpo_td043_timings = {
        .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
 };
 
+static inline struct panel_tpo_td043_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+       return (struct panel_tpo_td043_data *) dssdev->data;
+}
+
 static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 {
-       int nreset_gpio = tpo_td043->nreset_gpio;
        int r;
 
        if (tpo_td043->powered_on)
@@ -290,8 +299,8 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
        /* wait for panel to stabilize */
        msleep(160);
 
-       if (gpio_is_valid(nreset_gpio))
-               gpio_set_value(nreset_gpio, 1);
+       if (gpio_is_valid(tpo_td043->nreset_gpio))
+               gpio_set_value(tpo_td043->nreset_gpio, 1);
 
        tpo_td043_write(tpo_td043->spi, 2,
                        TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING);
@@ -308,16 +317,14 @@ static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
 
 static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043)
 {
-       int nreset_gpio = tpo_td043->nreset_gpio;
-
        if (!tpo_td043->powered_on)
                return;
 
        tpo_td043_write(tpo_td043->spi, 3,
                        TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
 
-       if (gpio_is_valid(nreset_gpio))
-               gpio_set_value(nreset_gpio, 0);
+       if (gpio_is_valid(tpo_td043->nreset_gpio))
+               gpio_set_value(tpo_td043->nreset_gpio, 0);
 
        /* wait for at least 2 vsyncs before cutting off power */
        msleep(50);
@@ -344,12 +351,6 @@ static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
        if (r)
                goto err0;
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err1;
-       }
-
        /*
         * If we are resuming from system suspend, SPI clocks might not be
         * enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -376,9 +377,6 @@ static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
        if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
                return;
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        omapdss_dpi_display_disable(dssdev);
 
        if (!tpo_td043->spi_suspended)
@@ -403,8 +401,8 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
 
 static int tpo_td043_probe(struct omap_dss_device *dssdev)
 {
-       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
-       int nreset_gpio = dssdev->reset_gpio;
+       struct tpo_td043_device *tpo_td043 = g_tpo_td043;
+       struct panel_tpo_td043_data *pdata = get_panel_data(dssdev);
        int ret = 0;
 
        dev_dbg(&dssdev->dev, "probe\n");
@@ -414,6 +412,11 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
                return -ENODEV;
        }
 
+       if (!pdata)
+               return -EINVAL;
+
+       tpo_td043->nreset_gpio = pdata->nreset_gpio;
+
        dssdev->panel.timings = tpo_td043_timings;
        dssdev->ctrl.pixel_size = 24;
 
@@ -427,9 +430,10 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
                goto fail_regulator;
        }
 
-       if (gpio_is_valid(nreset_gpio)) {
-               ret = gpio_request_one(nreset_gpio, GPIOF_OUT_INIT_LOW,
-                                       "lcd reset");
+       if (gpio_is_valid(tpo_td043->nreset_gpio)) {
+               ret = devm_gpio_request_one(&dssdev->dev,
+                               tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW,
+                               "lcd reset");
                if (ret < 0) {
                        dev_err(&dssdev->dev, "couldn't request reset GPIO\n");
                        goto fail_gpio_req;
@@ -440,6 +444,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
        if (ret)
                dev_warn(&dssdev->dev, "failed to create sysfs files\n");
 
+       dev_set_drvdata(&dssdev->dev, tpo_td043);
+
        return 0;
 
 fail_gpio_req:
@@ -452,14 +458,11 @@ fail_regulator:
 static void tpo_td043_remove(struct omap_dss_device *dssdev)
 {
        struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
-       int nreset_gpio = dssdev->reset_gpio;
 
        dev_dbg(&dssdev->dev, "remove\n");
 
        sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
        regulator_put(tpo_td043->vcc_reg);
-       if (gpio_is_valid(nreset_gpio))
-               gpio_free(nreset_gpio);
 }
 
 static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
@@ -505,6 +508,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
                return -ENODEV;
        }
 
+       if (g_tpo_td043 != NULL)
+               return -EBUSY;
+
        spi->bits_per_word = 16;
        spi->mode = SPI_MODE_0;
 
@@ -519,9 +525,8 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        tpo_td043->spi = spi;
-       tpo_td043->nreset_gpio = dssdev->reset_gpio;
        dev_set_drvdata(&spi->dev, tpo_td043);
-       dev_set_drvdata(&dssdev->dev, tpo_td043);
+       g_tpo_td043 = tpo_td043;
 
        omap_dss_register_driver(&tpo_td043_driver);
 
@@ -534,6 +539,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi)
 
        omap_dss_unregister_driver(&tpo_td043_driver);
        kfree(tpo_td043);
+       g_tpo_td043 = NULL;
 
        return 0;
 }
index d446bdfc4c821fc4a26caf401716cc843cef370a..a4b356a9780d305f37673743f40eb5cbe61942a9 100644 (file)
@@ -435,20 +435,27 @@ static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_man
 static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
 {
        unsigned long timeout = msecs_to_jiffies(500);
-       struct omap_dss_device *dssdev = mgr->get_device(mgr);
        u32 irq;
        int r;
 
+       if (mgr->output == NULL)
+               return -ENODEV;
+
        r = dispc_runtime_get();
        if (r)
                return r;
 
-       if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+       switch (mgr->output->id) {
+       case OMAP_DSS_OUTPUT_VENC:
                irq = DISPC_IRQ_EVSYNC_ODD;
-       else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+               break;
+       case OMAP_DSS_OUTPUT_HDMI:
                irq = DISPC_IRQ_EVSYNC_EVEN;
-       else
+               break;
+       default:
                irq = dispc_mgr_get_vsync_irq(mgr->id);
+               break;
+       }
 
        r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
 
index f8779d4750ba63f98908492f2b11b9bfe7cf4ea8..60cc6fee654815444c21d46909351f0e15af6b10 100644 (file)
@@ -181,10 +181,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
        d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
                        write, &dss_debug_fops);
 
-       if (IS_ERR(d))
-               return PTR_ERR(d);
-
-       return 0;
+       return PTR_RET(d);
 }
 #else /* CONFIG_OMAP2_DSS_DEBUGFS */
 static inline int dss_initialize_debugfs(void)
index 05ff2b91d9e8a49ee1e11c3c957b634eb4698393..b33b0169bb3b3f85f5722c52d8568cf0ad62ab4e 100644 (file)
@@ -69,6 +69,8 @@ struct dispc_features {
        u8 mgr_height_start;
        u16 mgr_width_max;
        u16 mgr_height_max;
+       unsigned long max_lcd_pclk;
+       unsigned long max_tv_pclk;
        int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
                const struct omap_video_timings *mgr_timings,
                u16 width, u16 height, u16 out_width, u16 out_height,
@@ -85,6 +87,9 @@ struct dispc_features {
 
        /* no DISPC_IRQ_FRAMEDONETV on this SoC */
        bool no_framedone_tv:1;
+
+       /* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
+       bool mstandby_workaround:1;
 };
 
 #define DISPC_MAX_NR_FIFOS 5
@@ -97,6 +102,8 @@ static struct {
 
        int irq;
 
+       unsigned long core_clk_rate;
+
        u32 fifo_size[DISPC_MAX_NR_FIFOS];
        /* maps which plane is using a fifo. fifo-id -> plane-id */
        int fifo_assignment[DISPC_MAX_NR_FIFOS];
@@ -1584,6 +1591,7 @@ static void dispc_ovl_set_scaling(enum omap_plane plane,
 }
 
 static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+               enum omap_dss_rotation_type rotation_type,
                bool mirroring, enum omap_color_mode color_mode)
 {
        bool row_repeat = false;
@@ -1634,6 +1642,15 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation,
        if (dss_has_feature(FEAT_ROWREPEATENABLE))
                REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
                        row_repeat ? 1 : 0, 18, 18);
+
+       if (color_mode == OMAP_DSS_COLOR_NV12) {
+               bool doublestride = (rotation_type == OMAP_DSS_ROT_TILER) &&
+                                       (rotation == OMAP_DSS_ROT_0 ||
+                                       rotation == OMAP_DSS_ROT_180);
+               /* DOUBLESTRIDE */
+               REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+       }
+
 }
 
 static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -2512,7 +2529,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
                dispc_ovl_set_vid_color_conv(plane, cconv);
        }
 
-       dispc_ovl_set_rotation_attrs(plane, rotation, mirror, color_mode);
+       dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, mirror,
+                       color_mode);
 
        dispc_ovl_set_zorder(plane, caps, zorder);
        dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
@@ -2823,6 +2841,15 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
        return true;
 }
 
+static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
+               unsigned long pclk)
+{
+       if (dss_mgr_is_lcd(channel))
+               return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+       else
+               return pclk <= dispc.feat->max_tv_pclk ? true : false;
+}
+
 bool dispc_mgr_timings_ok(enum omap_channel channel,
                const struct omap_video_timings *timings)
 {
@@ -2830,11 +2857,13 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
 
        timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
 
-       if (dss_mgr_is_lcd(channel))
-               timings_ok =  timings_ok && _dispc_lcd_timings_ok(timings->hsw,
-                                               timings->hfp, timings->hbp,
-                                               timings->vsw, timings->vfp,
-                                               timings->vbp);
+       timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixel_clock * 1000);
+
+       if (dss_mgr_is_lcd(channel)) {
+               timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+                               timings->hbp, timings->vsw, timings->vfp,
+                               timings->vbp);
+       }
 
        return timings_ok;
 }
@@ -2951,6 +2980,10 @@ static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
 
        dispc_write_reg(DISPC_DIVISORo(channel),
                        FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+
+       if (dss_has_feature(FEAT_CORE_CLK_DIV) == false &&
+                       channel == OMAP_DSS_CHANNEL_LCD)
+               dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
 }
 
 static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -3056,15 +3089,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
 
 unsigned long dispc_core_clk_rate(void)
 {
-       int lcd;
-       unsigned long fclk = dispc_fclk_rate();
-
-       if (dss_has_feature(FEAT_CORE_CLK_DIV))
-               lcd = REG_GET(DISPC_DIVISOR, 23, 16);
-       else
-               lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
-
-       return fclk / lcd;
+       return dispc.core_clk_rate;
 }
 
 static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
@@ -3313,67 +3338,79 @@ static void dispc_dump_regs(struct seq_file *s)
 #undef DUMPREG
 }
 
-/* with fck as input clock rate, find dispc dividers that produce req_pck */
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
+/* calculate clock rates using dividers in cinfo */
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
                struct dispc_clock_info *cinfo)
 {
-       u16 pcd_min, pcd_max;
-       unsigned long best_pck;
-       u16 best_ld, cur_ld;
-       u16 best_pd, cur_pd;
+       if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
+               return -EINVAL;
+       if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
+               return -EINVAL;
 
-       pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
-       pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+       cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
+       cinfo->pck = cinfo->lck / cinfo->pck_div;
 
-       best_pck = 0;
-       best_ld = 0;
-       best_pd = 0;
+       return 0;
+}
 
-       for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
-               unsigned long lck = fck / cur_ld;
+bool dispc_div_calc(unsigned long dispc,
+               unsigned long pck_min, unsigned long pck_max,
+               dispc_div_calc_func func, void *data)
+{
+       int lckd, lckd_start, lckd_stop;
+       int pckd, pckd_start, pckd_stop;
+       unsigned long pck, lck;
+       unsigned long lck_max;
+       unsigned long pckd_hw_min, pckd_hw_max;
+       unsigned min_fck_per_pck;
+       unsigned long fck;
 
-               for (cur_pd = pcd_min; cur_pd <= pcd_max; ++cur_pd) {
-                       unsigned long pck = lck / cur_pd;
-                       long old_delta = abs(best_pck - req_pck);
-                       long new_delta = abs(pck - req_pck);
+#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
+       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+#else
+       min_fck_per_pck = 0;
+#endif
 
-                       if (best_pck == 0 || new_delta < old_delta) {
-                               best_pck = pck;
-                               best_ld = cur_ld;
-                               best_pd = cur_pd;
+       pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
+       pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
 
-                               if (pck == req_pck)
-                                       goto found;
-                       }
+       lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
 
-                       if (pck < req_pck)
-                               break;
-               }
+       pck_min = pck_min ? pck_min : 1;
+       pck_max = pck_max ? pck_max : ULONG_MAX;
 
-               if (lck / pcd_min < req_pck)
-                       break;
-       }
+       lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul);
+       lckd_stop = min(dispc / pck_min, 255ul);
 
-found:
-       cinfo->lck_div = best_ld;
-       cinfo->pck_div = best_pd;
-       cinfo->lck = fck / cinfo->lck_div;
-       cinfo->pck = cinfo->lck / cinfo->pck_div;
-}
+       for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
+               lck = dispc / lckd;
 
-/* calculate clock rates using dividers in cinfo */
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
-               struct dispc_clock_info *cinfo)
-{
-       if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
-               return -EINVAL;
-       if (cinfo->pck_div < 1 || cinfo->pck_div > 255)
-               return -EINVAL;
+               pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
+               pckd_stop = min(lck / pck_min, pckd_hw_max);
 
-       cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
-       cinfo->pck = cinfo->lck / cinfo->pck_div;
+               for (pckd = pckd_start; pckd <= pckd_stop; ++pckd) {
+                       pck = lck / pckd;
 
-       return 0;
+                       /*
+                        * For OMAP2/3 the DISPC fclk is the same as LCD's logic
+                        * clock, which means we're configuring DISPC fclk here
+                        * also. Thus we need to use the calculated lck. For
+                        * OMAP4+ the DISPC fclk is a separate clock.
+                        */
+                       if (dss_has_feature(FEAT_CORE_CLK_DIV))
+                               fck = dispc_core_clk_rate();
+                       else
+                               fck = lck;
+
+                       if (fck < pck * min_fck_per_pck)
+                               continue;
+
+                       if (func(lckd, pckd, lck, pck, data))
+                               return true;
+               }
+       }
+
+       return false;
 }
 
 void dispc_mgr_set_clock_div(enum omap_channel channel,
@@ -3451,6 +3488,8 @@ static void _omap_dispc_initial_config(void)
                l = FLD_MOD(l, 1, 0, 0);
                l = FLD_MOD(l, 1, 23, 16);
                dispc_write_reg(DISPC_DIVISOR, l);
+
+               dispc.core_clk_rate = dispc_fclk_rate();
        }
 
        /* FUNCGATED */
@@ -3466,6 +3505,9 @@ static void _omap_dispc_initial_config(void)
        dispc_configure_burst_sizes();
 
        dispc_ovl_enable_zorder_planes();
+
+       if (dispc.feat->mstandby_workaround)
+               REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
 }
 
 static const struct dispc_features omap24xx_dispc_feats __initconst = {
@@ -3479,6 +3521,7 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       66500000,
        .calc_scaling           =       dispc_ovl_calc_scaling_24xx,
        .calc_core_clk          =       calc_core_clk_24xx,
        .num_fifos              =       3,
@@ -3496,6 +3539,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       173000000,
+       .max_tv_pclk            =       59000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_34xx,
        .calc_core_clk          =       calc_core_clk_34xx,
        .num_fifos              =       3,
@@ -3513,6 +3558,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       173000000,
+       .max_tv_pclk            =       59000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_34xx,
        .calc_core_clk          =       calc_core_clk_34xx,
        .num_fifos              =       3,
@@ -3530,6 +3577,8 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
        .mgr_height_start       =       26,
        .mgr_width_max          =       2048,
        .mgr_height_max         =       2048,
+       .max_lcd_pclk           =       170000000,
+       .max_tv_pclk            =       185625000,
        .calc_scaling           =       dispc_ovl_calc_scaling_44xx,
        .calc_core_clk          =       calc_core_clk_44xx,
        .num_fifos              =       5,
@@ -3547,10 +3596,13 @@ static const struct dispc_features omap54xx_dispc_feats __initconst = {
        .mgr_height_start       =       27,
        .mgr_width_max          =       4096,
        .mgr_height_max         =       4096,
+       .max_lcd_pclk           =       170000000,
+       .max_tv_pclk            =       186000000,
        .calc_scaling           =       dispc_ovl_calc_scaling_44xx,
        .calc_core_clk          =       calc_core_clk_44xx,
        .num_fifos              =       5,
        .gfx_fifo_workaround    =       true,
+       .mstandby_workaround    =       true,
 };
 
 static int __init dispc_init_features(struct platform_device *pdev)
index 222363c6e623f794402f68b98d419f5fc0d1f869..de4863d21ab78b7683d490628ef9bc5a7f1f335b 100644 (file)
@@ -39,6 +39,7 @@
 #define DISPC_GLOBAL_BUFFER            0x0800
 #define DISPC_CONTROL3                  0x0848
 #define DISPC_CONFIG3                   0x084C
+#define DISPC_MSTANDBY_CTRL            0x0858
 
 /* DISPC overlay registers */
 #define DISPC_OVL_BA0(n)               (DISPC_OVL_BASE(n) + \
index 4af136a04e53ce4fafc67a3435a0121062e53674..e93c4debea7fc46297a973bac7a3a7149ea5ef76 100644 (file)
@@ -63,15 +63,29 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
        case OMAPDSS_VER_OMAP3630:
        case OMAPDSS_VER_AM35xx:
                return NULL;
-       default:
-               break;
-       }
 
-       switch (channel) {
-       case OMAP_DSS_CHANNEL_LCD:
-               return dsi_get_dsidev_from_id(0);
-       case OMAP_DSS_CHANNEL_LCD2:
-               return dsi_get_dsidev_from_id(1);
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               switch (channel) {
+               case OMAP_DSS_CHANNEL_LCD:
+                       return dsi_get_dsidev_from_id(0);
+               case OMAP_DSS_CHANNEL_LCD2:
+                       return dsi_get_dsidev_from_id(1);
+               default:
+                       return NULL;
+               }
+
+       case OMAPDSS_VER_OMAP5:
+               switch (channel) {
+               case OMAP_DSS_CHANNEL_LCD:
+                       return dsi_get_dsidev_from_id(0);
+               case OMAP_DSS_CHANNEL_LCD3:
+                       return dsi_get_dsidev_from_id(1);
+               default:
+                       return NULL;
+               }
+
        default:
                return NULL;
        }
@@ -91,75 +105,211 @@ static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
        }
 }
 
-static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
+struct dpi_clk_calc_ctx {
+       struct platform_device *dsidev;
+
+       /* inputs */
+
+       unsigned long pck_min, pck_max;
+
+       /* outputs */
+
+       struct dsi_clock_info dsi_cinfo;
+       struct dss_clock_info dss_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       /*
+        * Odd dividers give us uneven duty cycle, causing problem when level
+        * shifted. So skip all odd dividers when the pixel clock is on the
+        * higher side.
+        */
+       if (ctx->pck_min >= 1000000) {
+               if (lckd > 1 && lckd % 2 != 0)
+                       return false;
+
+               if (pckd > 1 && pckd % 2 != 0)
+                       return false;
+       }
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       return true;
+}
+
+
+static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       /*
+        * Odd dividers give us uneven duty cycle, causing problem when level
+        * shifted. So skip all odd dividers when the pixel clock is on the
+        * higher side.
+        */
+       if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 1000000)
+               return false;
+
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+       return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+
+static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll,
+               void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min,
+                       dpi_calc_hsdiv_cb, ctx);
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+       struct dpi_clk_calc_ctx *ctx = data;
+
+       ctx->dss_cinfo.fck = fck;
+       ctx->dss_cinfo.fck_div = fckd;
+
+       return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+       unsigned long clkin;
+       unsigned long pll_min, pll_max;
+
+       clkin = dsi_get_pll_clkin(dpi.dsidev);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dpi.dsidev;
+       ctx->pck_min = pck - 1000;
+       ctx->pck_max = pck + 1000;
+       ctx->dsi_cinfo.clkin = clkin;
+
+       pll_min = 0;
+       pll_max = 0;
+
+       return dsi_pll_calc(dpi.dsidev, clkin,
+                       pll_min, pll_max,
+                       dpi_calc_pll_cb, ctx);
+}
+
+static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+{
+       int i;
+
+       /*
+        * DSS fck gives us very few possibilities, so finding a good pixel
+        * clock may not be possible. We try multiple times to find the clock,
+        * each time widening the pixel clock range we look for, up to
+        * +/- ~15MHz.
+        */
+
+       for (i = 0; i < 25; ++i) {
+               bool ok;
+
+               memset(ctx, 0, sizeof(*ctx));
+               if (pck > 1000 * i * i * i)
+                       ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
+               else
+                       ctx->pck_min = 0;
+               ctx->pck_max = pck + 1000 * i * i * i;
+
+               ok = dss_div_calc(ctx->pck_min, dpi_calc_dss_cb, ctx);
+               if (ok)
+                       return ok;
+       }
+
+       return false;
+}
+
+
+
+static int dpi_set_dsi_clk(enum omap_channel channel,
                unsigned long pck_req, unsigned long *fck, int *lck_div,
                int *pck_div)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
-       struct dsi_clock_info dsi_cinfo;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
        int r;
+       bool ok;
 
-       r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo,
-                       &dispc_cinfo);
-       if (r)
-               return r;
+       ok = dpi_dsi_clk_calc(pck_req, &ctx);
+       if (!ok)
+               return -EINVAL;
 
-       r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo);
+       r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo);
        if (r)
                return r;
 
-       dss_select_lcd_clk_source(mgr->id,
-                       dpi_get_alt_clk_src(mgr->id));
+       dss_select_lcd_clk_source(channel,
+                       dpi_get_alt_clk_src(channel));
 
-       dpi.mgr_config.clock_info = dispc_cinfo;
+       dpi.mgr_config.clock_info = ctx.dispc_cinfo;
 
-       *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
-       *lck_div = dispc_cinfo.lck_div;
-       *pck_div = dispc_cinfo.pck_div;
+       *fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+       *lck_div = ctx.dispc_cinfo.lck_div;
+       *pck_div = ctx.dispc_cinfo.pck_div;
 
        return 0;
 }
 
-static int dpi_set_dispc_clk(struct omap_dss_device *dssdev,
-               unsigned long pck_req, unsigned long *fck, int *lck_div,
-               int *pck_div)
+static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
+               int *lck_div, int *pck_div)
 {
-       struct dss_clock_info dss_cinfo;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
        int r;
+       bool ok;
 
-       r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo);
-       if (r)
-               return r;
+       ok = dpi_dss_clk_calc(pck_req, &ctx);
+       if (!ok)
+               return -EINVAL;
 
-       r = dss_set_clock_div(&dss_cinfo);
+       r = dss_set_clock_div(&ctx.dss_cinfo);
        if (r)
                return r;
 
-       dpi.mgr_config.clock_info = dispc_cinfo;
+       dpi.mgr_config.clock_info = ctx.dispc_cinfo;
 
-       *fck = dss_cinfo.fck;
-       *lck_div = dispc_cinfo.lck_div;
-       *pck_div = dispc_cinfo.pck_div;
+       *fck = ctx.dss_cinfo.fck;
+       *lck_div = ctx.dispc_cinfo.lck_div;
+       *pck_div = ctx.dispc_cinfo.pck_div;
 
        return 0;
 }
 
-static int dpi_set_mode(struct omap_dss_device *dssdev)
+static int dpi_set_mode(struct omap_overlay_manager *mgr)
 {
        struct omap_video_timings *t = &dpi.timings;
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int lck_div = 0, pck_div = 0;
        unsigned long fck = 0;
        unsigned long pck;
        int r = 0;
 
        if (dpi.dsidev)
-               r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
+               r = dpi_set_dsi_clk(mgr->id, t->pixel_clock * 1000, &fck,
                                &lck_div, &pck_div);
        else
-               r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck,
+               r = dpi_set_dispc_clk(t->pixel_clock * 1000, &fck,
                                &lck_div, &pck_div);
        if (r)
                return r;
@@ -179,10 +329,8 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
+static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
-
        dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
 
        dpi.mgr_config.stallmode = false;
@@ -197,7 +345,7 @@ static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
 
 int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
 {
-       struct omap_dss_output *out = dssdev->output;
+       struct omap_dss_output *out = &dpi.output;
        int r;
 
        mutex_lock(&dpi.lock);
@@ -230,7 +378,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
        if (r)
                goto err_get_dispc;
 
-       r = dss_dpi_select_source(dssdev->channel);
+       r = dss_dpi_select_source(out->manager->id);
        if (r)
                goto err_src_sel;
 
@@ -244,11 +392,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
                        goto err_dsi_pll_init;
        }
 
-       r = dpi_set_mode(dssdev);
+       r = dpi_set_mode(out->manager);
        if (r)
                goto err_set_mode;
 
-       dpi_config_lcd_manager(dssdev);
+       dpi_config_lcd_manager(out->manager);
 
        mdelay(2);
 
@@ -285,7 +433,7 @@ EXPORT_SYMBOL(omapdss_dpi_display_enable);
 
 void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
 {
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dpi.output.manager;
 
        mutex_lock(&dpi.lock);
 
@@ -324,12 +472,12 @@ EXPORT_SYMBOL(omapdss_dpi_set_timings);
 int dpi_check_timings(struct omap_dss_device *dssdev,
                        struct omap_video_timings *timings)
 {
-       int r;
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dpi.output.manager;
        int lck_div, pck_div;
        unsigned long fck;
        unsigned long pck;
-       struct dispc_clock_info dispc_cinfo;
+       struct dpi_clk_calc_ctx ctx;
+       bool ok;
 
        if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
                return -EINVAL;
@@ -338,28 +486,21 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
                return -EINVAL;
 
        if (dpi.dsidev) {
-               struct dsi_clock_info dsi_cinfo;
-               r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
-                               timings->pixel_clock * 1000,
-                               &dsi_cinfo, &dispc_cinfo);
-
-               if (r)
-                       return r;
+               ok = dpi_dsi_clk_calc(timings->pixel_clock * 1000, &ctx);
+               if (!ok)
+                       return -EINVAL;
 
-               fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+               fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
        } else {
-               struct dss_clock_info dss_cinfo;
-               r = dss_calc_clock_div(timings->pixel_clock * 1000,
-                               &dss_cinfo, &dispc_cinfo);
+               ok = dpi_dss_clk_calc(timings->pixel_clock * 1000, &ctx);
+               if (!ok)
+                       return -EINVAL;
 
-               if (r)
-                       return r;
-
-               fck = dss_cinfo.fck;
+               fck = ctx.dss_cinfo.fck;
        }
 
-       lck_div = dispc_cinfo.lck_div;
-       pck_div = dispc_cinfo.pck_div;
+       lck_div = ctx.dispc_cinfo.lck_div;
+       pck_div = ctx.dispc_cinfo.pck_div;
 
        pck = fck / lck_div / pck_div / 1000;
 
@@ -401,6 +542,36 @@ static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
        return 0;
 }
 
+/*
+ * Return a hardcoded channel for the DPI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dpi_get_channel(void)
+{
+       switch (omapdss_get_version()) {
+       case OMAPDSS_VER_OMAP24xx:
+       case OMAPDSS_VER_OMAP34xx_ES1:
+       case OMAPDSS_VER_OMAP34xx_ES3:
+       case OMAPDSS_VER_OMAP3630:
+       case OMAPDSS_VER_AM35xx:
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               return OMAP_DSS_CHANNEL_LCD2;
+
+       case OMAPDSS_VER_OMAP5:
+               return OMAP_DSS_CHANNEL_LCD3;
+
+       default:
+               DSSWARN("unsupported DSS version\n");
+               return OMAP_DSS_CHANNEL_LCD;
+       }
+}
+
 static int __init dpi_init_display(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev;
@@ -421,12 +592,7 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
                dpi.vdds_dsi_reg = vdds_dsi;
        }
 
-       /*
-        * XXX We shouldn't need dssdev->channel for this. The dsi pll clock
-        * source for DPI is SoC integration detail, not something that should
-        * be configured in the dssdev
-        */
-       dsidev = dpi_get_dsidev(dssdev->channel);
+       dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
 
        if (dsidev && dpi_verify_dsi_pll(dsidev)) {
                dsidev = NULL;
@@ -517,6 +683,8 @@ static void __init dpi_init_output(struct platform_device *pdev)
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_DPI;
        out->type = OMAP_DISPLAY_TYPE_DPI;
+       out->name = "dpi.0";
+       out->dispc_channel = dpi_get_channel();
 
        dss_register_output(out);
 }
index 28d41d16b7be45aa10d646ba8983cd57e418e363..9b1c5ecee11517389f17103eb5495321c1b7fc6f 100644 (file)
@@ -200,6 +200,11 @@ struct dsi_reg { u16 idx; };
 
 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+       struct omap_overlay_manager *mgr);
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+       struct omap_overlay_manager *mgr);
+
 #define DSI_MAX_NR_ISRS                2
 #define DSI_MAX_NR_LANES       5
 
@@ -250,6 +255,24 @@ struct dsi_isr_tables {
        struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
 };
 
+struct dsi_clk_calc_ctx {
+       struct platform_device *dsidev;
+
+       /* inputs */
+
+       const struct omap_dss_dsi_config *config;
+
+       unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+       /* outputs */
+
+       struct dsi_clock_info dsi_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+
+       struct omap_video_timings dispc_vm;
+       struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
 struct dsi_data {
        struct platform_device *pdev;
        void __iomem    *base;
@@ -261,6 +284,9 @@ struct dsi_data {
        struct clk *dss_clk;
        struct clk *sys_clk;
 
+       struct dispc_clock_info user_dispc_cinfo;
+       struct dsi_clock_info user_dsi_cinfo;
+
        struct dsi_clock_info current_cinfo;
 
        bool vdds_dsi_enabled;
@@ -324,6 +350,7 @@ struct dsi_data {
        unsigned long lpdiv_max;
 
        unsigned num_lanes_supported;
+       unsigned line_buffer_size;
 
        struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
        unsigned num_lanes_used;
@@ -1192,15 +1219,33 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
        return r;
 }
 
-static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
+static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
+               unsigned long lp_clk_min, unsigned long lp_clk_max)
+{
+       unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
+       unsigned lp_clk_div;
+       unsigned long lp_clk;
+
+       lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
+       lp_clk = dsi_fclk / 2 / lp_clk_div;
+
+       if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
+               return -EINVAL;
+
+       cinfo->lp_clk_div = lp_clk_div;
+       cinfo->lp_clk = lp_clk;
+
+       return 0;
+}
+
+static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        unsigned long dsi_fclk;
        unsigned lp_clk_div;
        unsigned long lp_clk;
 
-       lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
+       lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
 
        if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
                return -EINVAL;
@@ -1272,6 +1317,75 @@ static int dsi_pll_power(struct platform_device *dsidev,
        return 0;
 }
 
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       return clk_get_rate(dsi->sys_clk);
+}
+
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+               unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int regm, regm_start, regm_stop;
+       unsigned long out_max;
+       unsigned long out;
+
+       out_min = out_min ? out_min : 1;
+       out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+       regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
+       regm_stop = min(pll / out_min, dsi->regm_dispc_max);
+
+       for (regm = regm_start; regm <= regm_stop; ++regm) {
+               out = pll / regm;
+
+               if (func(regm, out, data))
+                       return true;
+       }
+
+       return false;
+}
+
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+               unsigned long pll_min, unsigned long pll_max,
+               dsi_pll_calc_func func, void *data)
+{
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       int regn, regn_start, regn_stop;
+       int regm, regm_start, regm_stop;
+       unsigned long fint, pll;
+       const unsigned long pll_hw_max = 1800000000;
+       unsigned long fint_hw_min, fint_hw_max;
+
+       fint_hw_min = dsi->fint_min;
+       fint_hw_max = dsi->fint_max;
+
+       regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+       regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
+
+       pll_max = pll_max ? pll_max : ULONG_MAX;
+
+       for (regn = regn_start; regn <= regn_stop; ++regn) {
+               fint = clkin / regn;
+
+               regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+                               1ul);
+               regm_stop = min3(pll_max / fint / 2,
+                               pll_hw_max / fint / 2,
+                               dsi->regm_max);
+
+               for (regm = regm_start; regm <= regm_stop; ++regm) {
+                       pll = 2 * regm * fint;
+
+                       if (func(regn, regm, fint, pll, data))
+                               return true;
+               }
+       }
+
+       return false;
+}
+
 /* calculate clock rates using dividers in cinfo */
 static int dsi_calc_clock_rates(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
@@ -1316,192 +1430,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev,
        return 0;
 }
 
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cur, best;
-       struct dispc_clock_info best_dispc;
-       int min_fck_per_pck;
-       int match = 0;
-       unsigned long dss_sys_clk, max_dss_fck;
-
-       dss_sys_clk = clk_get_rate(dsi->sys_clk);
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       if (req_pck == dsi->cache_req_pck &&
-                       dsi->cache_cinfo.clkin == dss_sys_clk) {
-               DSSDBG("DSI clock info found from cache\n");
-               *dsi_cinfo = dsi->cache_cinfo;
-               dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
-                       dispc_cinfo);
-               return 0;
-       }
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-               req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-       DSSDBG("dsi_pll_calc\n");
-
-retry:
-       memset(&best, 0, sizeof(best));
-       memset(&best_dispc, 0, sizeof(best_dispc));
-
-       memset(&cur, 0, sizeof(cur));
-       cur.clkin = dss_sys_clk;
-
-       /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
-       /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
-       for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               cur.fint = cur.clkin / cur.regn;
-
-               if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
-                       continue;
-
-               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
-               for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
-                       unsigned long a, b;
-
-                       a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn;
-                       cur.clkin4ddr = a / b * 1000;
-
-                       if (cur.clkin4ddr > 1800 * 1000 * 1000)
-                               break;
-
-                       /* dsi_pll_hsdiv_dispc_clk(MHz) =
-                        * DSIPHY(MHz) / regm_dispc  < 173MHz/186Mhz */
-                       for (cur.regm_dispc = 1; cur.regm_dispc <
-                                       dsi->regm_dispc_max; ++cur.regm_dispc) {
-                               struct dispc_clock_info cur_dispc;
-                               cur.dsi_pll_hsdiv_dispc_clk =
-                                       cur.clkin4ddr / cur.regm_dispc;
-
-                               if (cur.regm_dispc > 1 &&
-                                               cur.regm_dispc % 2 != 0 &&
-                                               req_pck >= 1000000)
-                                       continue;
-
-                               /* this will narrow down the search a bit,
-                                * but still give pixclocks below what was
-                                * requested */
-                               if (cur.dsi_pll_hsdiv_dispc_clk  < req_pck)
-                                       break;
-
-                               if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
-                                       continue;
-
-                               if (min_fck_per_pck &&
-                                       cur.dsi_pll_hsdiv_dispc_clk <
-                                               req_pck * min_fck_per_pck)
-                                       continue;
-
-                               match = 1;
-
-                               dispc_find_clk_divs(req_pck,
-                                               cur.dsi_pll_hsdiv_dispc_clk,
-                                               &cur_dispc);
-
-                               if (abs(cur_dispc.pck - req_pck) <
-                                               abs(best_dispc.pck - req_pck)) {
-                                       best = cur;
-                                       best_dispc = cur_dispc;
-
-                                       if (cur_dispc.pck == req_pck)
-                                               goto found;
-                               }
-                       }
-               }
-       }
-found:
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-
-       /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
-       best.regm_dsi = 0;
-       best.dsi_pll_hsdiv_dsi_clk = 0;
-
-       if (dsi_cinfo)
-               *dsi_cinfo = best;
-       if (dispc_cinfo)
-               *dispc_cinfo = best_dispc;
-
-       dsi->cache_req_pck = req_pck;
-       dsi->cache_clk_freq = 0;
-       dsi->cache_cinfo = best;
-
-       return 0;
-}
-
-static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
-               unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cur, best;
-
-       DSSDBG("dsi_pll_calc_ddrfreq\n");
-
-       memset(&best, 0, sizeof(best));
-       memset(&cur, 0, sizeof(cur));
-
-       cur.clkin = clk_get_rate(dsi->sys_clk);
-
-       for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
-               cur.fint = cur.clkin / cur.regn;
-
-               if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
-                       continue;
-
-               /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
-               for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
-                       unsigned long a, b;
-
-                       a = 2 * cur.regm * (cur.clkin/1000);
-                       b = cur.regn;
-                       cur.clkin4ddr = a / b * 1000;
-
-                       if (cur.clkin4ddr > 1800 * 1000 * 1000)
-                               break;
-
-                       if (abs(cur.clkin4ddr - req_clkin4ddr) <
-                                       abs(best.clkin4ddr - req_clkin4ddr)) {
-                               best = cur;
-                               DSSDBG("best %ld\n", best.clkin4ddr);
-                       }
-
-                       if (cur.clkin4ddr == req_clkin4ddr)
-                               goto found;
-               }
-       }
-found:
-       if (cinfo)
-               *cinfo = best;
-
-       return 0;
-}
-
-static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
-               struct dsi_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
 {
        unsigned long max_dsi_fck;
 
@@ -1511,90 +1440,6 @@ static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
        cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
 }
 
-static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       unsigned regm_dispc, best_regm_dispc;
-       unsigned long dispc_clk, best_dispc_clk;
-       int min_fck_per_pck;
-       unsigned long max_dss_fck;
-       struct dispc_clock_info best_dispc;
-       bool match;
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-                       req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-retry:
-       best_regm_dispc = 0;
-       best_dispc_clk = 0;
-       memset(&best_dispc, 0, sizeof(best_dispc));
-       match = false;
-
-       for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
-               struct dispc_clock_info cur_dispc;
-
-               dispc_clk = cinfo->clkin4ddr / regm_dispc;
-
-               /* this will narrow down the search a bit,
-                * but still give pixclocks below what was
-                * requested */
-               if (dispc_clk  < req_pck)
-                       break;
-
-               if (dispc_clk > max_dss_fck)
-                       continue;
-
-               if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
-                       continue;
-
-               match = true;
-
-               dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
-
-               if (abs(cur_dispc.pck - req_pck) <
-                               abs(best_dispc.pck - req_pck)) {
-                       best_regm_dispc = regm_dispc;
-                       best_dispc_clk = dispc_clk;
-                       best_dispc = cur_dispc;
-
-                       if (cur_dispc.pck == req_pck)
-                               goto found;
-               }
-       }
-
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-found:
-       cinfo->regm_dispc = best_regm_dispc;
-       cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
-
-       *dispc_cinfo = best_dispc;
-
-       return 0;
-}
-
 int dsi_pll_set_clock_div(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo)
 {
@@ -2783,6 +2628,7 @@ static int dsi_vc_enable(struct platform_device *dsidev, int channel,
 
 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
 {
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        u32 r;
 
        DSSDBG("Initial config of virtual channel %d", channel);
@@ -2807,6 +2653,8 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
        r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
 
        dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
+
+       dsi->vc[channel].source = DSI_VC_SOURCE_L4;
 }
 
 static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
@@ -3777,13 +3625,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
 
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
-               unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
                struct omap_video_timings *timings = &dsi->timings;
                /*
                 * Don't use line buffers if width is greater than the video
                 * port's line buffer size
                 */
-               if (line_buf_size <= timings->x_res * bpp / 8)
+               if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
                        num_line_buffers = 0;
                else
                        num_line_buffers = 2;
@@ -3799,18 +3646,22 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
 static void dsi_config_vp_sync_events(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       bool vsync_end = dsi->vm_timings.vp_vsync_end;
-       bool hsync_end = dsi->vm_timings.vp_hsync_end;
+       bool sync_end;
        u32 r;
 
+       if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
+               sync_end = true;
+       else
+               sync_end = false;
+
        r = dsi_read_reg(dsidev, DSI_CTRL);
        r = FLD_MOD(r, 1, 9, 9);                /* VP_DE_POL */
        r = FLD_MOD(r, 1, 10, 10);              /* VP_HSYNC_POL */
        r = FLD_MOD(r, 1, 11, 11);              /* VP_VSYNC_POL */
        r = FLD_MOD(r, 1, 15, 15);              /* VP_VSYNC_START */
-       r = FLD_MOD(r, vsync_end, 16, 16);      /* VP_VSYNC_END */
+       r = FLD_MOD(r, sync_end, 16, 16);       /* VP_VSYNC_END */
        r = FLD_MOD(r, 1, 17, 17);              /* VP_HSYNC_START */
-       r = FLD_MOD(r, hsync_end, 18, 18);      /* VP_HSYNC_END */
+       r = FLD_MOD(r, sync_end, 18, 18);       /* VP_HSYNC_END */
        dsi_write_reg(dsidev, DSI_CTRL, r);
 }
 
@@ -3897,9 +3748,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
        return max(lp_inter, 0);
 }
 
-static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        int blanking_mode;
        int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
@@ -3910,7 +3760,7 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
        struct omap_video_timings *timings = &dsi->timings;
        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
        int ndl = dsi->num_lanes_used - 1;
-       int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+       int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
        int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
        int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
        int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
@@ -4015,9 +3865,8 @@ static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
        dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
 }
 
-static int dsi_proto_config(struct omap_dss_device *dssdev)
+static int dsi_proto_config(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        u32 r;
        int buswidth = 0;
@@ -4075,7 +3924,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_config_vp_sync_events(dsidev);
                dsi_config_blanking_modes(dsidev);
-               dsi_config_cmd_mode_interleaving(dssdev);
+               dsi_config_cmd_mode_interleaving(dsidev);
        }
 
        dsi_vc_initial_config(dsidev, 0);
@@ -4159,11 +4008,12 @@ static void dsi_proto_timings(struct platform_device *dsidev)
                int vfp = dsi->vm_timings.vfp;
                int vbp = dsi->vm_timings.vbp;
                int window_sync = dsi->vm_timings.window_sync;
-               bool hsync_end = dsi->vm_timings.vp_hsync_end;
+               bool hsync_end;
                struct omap_video_timings *timings = &dsi->timings;
                int bpp = dsi_get_pixel_size(dsi->pix_fmt);
                int tl, t_he, width_bytes;
 
+               hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
                t_he = hsync_end ?
                        ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
 
@@ -4266,82 +4116,26 @@ int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
 }
 EXPORT_SYMBOL(omapdss_dsi_configure_pins);
 
-int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
-               unsigned long ddr_clk, unsigned long lp_clk)
-{
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct dsi_clock_info cinfo;
-       struct dispc_clock_info dispc_cinfo;
-       unsigned lp_clk_div;
-       unsigned long dsi_fclk;
-       int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
-       unsigned long pck;
-       int r;
-
-       DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
-
-       mutex_lock(&dsi->lock);
-
-       /* Calculate PLL output clock */
-       r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
-       if (r)
-               goto err;
-
-       /* Calculate PLL's DSI clock */
-       dsi_pll_calc_dsi_fck(dsidev, &cinfo);
-
-       /* Calculate PLL's DISPC clock and pck & lck divs */
-       pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
-       DSSDBG("finding dispc dividers for pck %lu\n", pck);
-       r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
-       if (r)
-               goto err;
-
-       /* Calculate LP clock */
-       dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
-       lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
-
-       dssdev->clocks.dsi.regn = cinfo.regn;
-       dssdev->clocks.dsi.regm = cinfo.regm;
-       dssdev->clocks.dsi.regm_dispc = cinfo.regm_dispc;
-       dssdev->clocks.dsi.regm_dsi = cinfo.regm_dsi;
-
-       dssdev->clocks.dsi.lp_clk_div = lp_clk_div;
-
-       dssdev->clocks.dispc.channel.lck_div = dispc_cinfo.lck_div;
-       dssdev->clocks.dispc.channel.pck_div = dispc_cinfo.pck_div;
-
-       dssdev->clocks.dispc.dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
-
-       dssdev->clocks.dispc.channel.lcd_clk_src =
-               dsi->module_id == 0 ?
-               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
-               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
-
-       dssdev->clocks.dsi.dsi_fclk_src =
-               dsi->module_id == 0 ?
-               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
-               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI;
-
-       mutex_unlock(&dsi->lock);
-       return 0;
-err:
-       mutex_unlock(&dsi->lock);
-       return r;
-}
-EXPORT_SYMBOL(omapdss_dsi_set_clocks);
-
 int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
        int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+       struct omap_dss_output *out = &dsi->output;
        u8 data_type;
        u16 word_count;
        int r;
 
+       if (out == NULL || out->manager == NULL) {
+               DSSERR("failed to enable display: no output/manager\n");
+               return -ENODEV;
+       }
+
+       r = dsi_display_init_dispc(dsidev, mgr);
+       if (r)
+               goto err_init_dispc;
+
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                switch (dsi->pix_fmt) {
                case OMAP_DSS_DSI_FMT_RGB888:
@@ -4357,8 +4151,8 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
                        data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
                        break;
                default:
-                       BUG();
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto err_pix_fmt;
                };
 
                dsi_if_enable(dsidev, false);
@@ -4377,16 +4171,20 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
        }
 
        r = dss_mgr_enable(mgr);
-       if (r) {
-               if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
-                       dsi_if_enable(dsidev, false);
-                       dsi_vc_enable(dsidev, channel, false);
-               }
-
-               return r;
-       }
+       if (r)
+               goto err_mgr_enable;
 
        return 0;
+
+err_mgr_enable:
+       if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+               dsi_if_enable(dsidev, false);
+               dsi_vc_enable(dsidev, channel, false);
+       }
+err_pix_fmt:
+       dsi_display_uninit_dispc(dsidev, mgr);
+err_init_dispc:
+       return r;
 }
 EXPORT_SYMBOL(dsi_enable_video_output);
 
@@ -4394,7 +4192,7 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
 
        if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
                dsi_if_enable(dsidev, false);
@@ -4408,14 +4206,15 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
        }
 
        dss_mgr_disable(mgr);
+
+       dsi_display_uninit_dispc(dsidev, mgr);
 }
 EXPORT_SYMBOL(dsi_disable_video_output);
 
-static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
+static void dsi_update_screen_dispc(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
+       struct omap_overlay_manager *mgr = dsi->output.manager;
        unsigned bytespp;
        unsigned bytespl;
        unsigned bytespf;
@@ -4425,7 +4224,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
        u32 l;
        int r;
        const unsigned channel = dsi->update_channel;
-       const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
+       const unsigned line_buf_size = dsi->line_buffer_size;
        u16 w = dsi->timings.x_res;
        u16 h = dsi->timings.y_res;
 
@@ -4571,7 +4370,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
        dsi->update_bytes = dw * dh *
                dsi_get_pixel_size(dsi->pix_fmt) / 8;
 #endif
-       dsi_update_screen_dispc(dssdev);
+       dsi_update_screen_dispc(dsidev);
 
        return 0;
 }
@@ -4579,18 +4378,17 @@ EXPORT_SYMBOL(omap_dsi_update);
 
 /* Display funcs */
 
-static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dispc_clock_info dispc_cinfo;
        int r;
-       unsigned long long fck;
+       unsigned long fck;
 
        fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
 
-       dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
-       dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
+       dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
+       dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
 
        r = dispc_calc_clock_rates(fck, &dispc_cinfo);
        if (r) {
@@ -4603,21 +4401,17 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
+static int dsi_display_init_dispc(struct platform_device *dsidev,
+               struct omap_overlay_manager *mgr)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int r;
 
-       if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
-               dsi->timings.hsw = 1;
-               dsi->timings.hfp = 1;
-               dsi->timings.hbp = 1;
-               dsi->timings.vsw = 1;
-               dsi->timings.vfp = 0;
-               dsi->timings.vbp = 0;
+       dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
+                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
+                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
 
+       if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
                r = dss_mgr_register_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
                if (r) {
@@ -4645,7 +4439,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
 
        dss_mgr_set_timings(mgr, &dsi->timings);
 
-       r = dsi_configure_dispc_clocks(dssdev);
+       r = dsi_configure_dispc_clocks(dsidev);
        if (r)
                goto err1;
 
@@ -4662,30 +4456,30 @@ err1:
                dss_mgr_unregister_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
 err:
+       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
        return r;
 }
 
-static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
+static void dsi_display_uninit_dispc(struct platform_device *dsidev,
+               struct omap_overlay_manager *mgr)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
 
        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
                dss_mgr_unregister_framedone_handler(mgr,
                                dsi_framedone_irq_callback, dsidev);
+
+       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
 }
 
-static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
+static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dsi_clock_info cinfo;
        int r;
 
-       cinfo.regn  = dssdev->clocks.dsi.regn;
-       cinfo.regm  = dssdev->clocks.dsi.regm;
-       cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
-       cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
+       cinfo = dsi->user_dsi_cinfo;
+
        r = dsi_calc_clock_rates(dsidev, &cinfo);
        if (r) {
                DSSERR("Failed to calc dsi clocks\n");
@@ -4701,24 +4495,22 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
        return 0;
 }
 
-static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
+static int dsi_display_init_dsi(struct platform_device *dsidev)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
        int r;
 
        r = dsi_pll_init(dsidev, true, true);
        if (r)
                goto err0;
 
-       r = dsi_configure_dsi_clocks(dssdev);
+       r = dsi_configure_dsi_clocks(dsidev);
        if (r)
                goto err1;
 
-       dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
-       dss_select_lcd_clk_source(mgr->id,
-                       dssdev->clocks.dispc.channel.lcd_clk_src);
+       dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
+                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
+                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
 
        DSSDBG("PLL OK\n");
 
@@ -4729,12 +4521,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
        _dsi_print_reset_status(dsidev);
 
        dsi_proto_timings(dsidev);
-       dsi_set_lp_clk_divisor(dssdev);
+       dsi_set_lp_clk_divisor(dsidev);
 
        if (1)
                _dsi_print_reset_status(dsidev);
 
-       r = dsi_proto_config(dssdev);
+       r = dsi_proto_config(dsidev);
        if (r)
                goto err3;
 
@@ -4751,20 +4543,16 @@ err3:
        dsi_cio_uninit(dsidev);
 err2:
        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
-       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
-
 err1:
        dsi_pll_uninit(dsidev, true);
 err0:
        return r;
 }
 
-static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
+static void dsi_display_uninit_dsi(struct platform_device *dsidev,
                bool disconnect_lanes, bool enter_ulps)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_overlay_manager *mgr = dssdev->output->manager;
 
        if (enter_ulps && !dsi->ulps_enabled)
                dsi_enter_ulps(dsidev);
@@ -4777,7 +4565,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
        dsi_vc_enable(dsidev, 3, 0);
 
        dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
-       dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
        dsi_cio_uninit(dsidev);
        dsi_pll_uninit(dsidev, disconnect_lanes);
 }
@@ -4786,7 +4573,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-       struct omap_dss_output *out = dssdev->output;
        int r = 0;
 
        DSSDBG("dsi_display_enable\n");
@@ -4795,12 +4581,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 
        mutex_lock(&dsi->lock);
 
-       if (out == NULL || out->manager == NULL) {
-               DSSERR("failed to enable display: no output/manager\n");
-               r = -ENODEV;
-               goto err_start_dev;
-       }
-
        r = omap_dss_start_device(dssdev);
        if (r) {
                DSSERR("failed to start device\n");
@@ -4815,11 +4595,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
 
        _dsi_initialize_irq(dsidev);
 
-       r = dsi_display_init_dispc(dssdev);
-       if (r)
-               goto err_init_dispc;
-
-       r = dsi_display_init_dsi(dssdev);
+       r = dsi_display_init_dsi(dsidev);
        if (r)
                goto err_init_dsi;
 
@@ -4828,8 +4604,6 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
        return 0;
 
 err_init_dsi:
-       dsi_display_uninit_dispc(dssdev);
-err_init_dispc:
        dsi_enable_pll_clock(dsidev, 0);
        dsi_runtime_put(dsidev);
 err_get_dsi:
@@ -4858,9 +4632,7 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
        dsi_sync_vc(dsidev, 2);
        dsi_sync_vc(dsidev, 3);
 
-       dsi_display_uninit_dispc(dssdev);
-
-       dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
+       dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
 
        dsi_runtime_put(dsidev);
        dsi_enable_pll_clock(dsidev, 0);
@@ -4881,75 +4653,577 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
 }
 EXPORT_SYMBOL(omapdss_dsi_enable_te);
 
-void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings)
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+static void print_dsi_vm(const char *str,
+               const struct omap_dss_dsi_videomode_timings *t)
+{
+       unsigned long byteclk = t->hsclk / 4;
+       int bl, wc, pps, tot;
+
+       wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
+       pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
+       bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+       tot = bl + pps;
+
+#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
+
+       pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
+                       "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
+                       str,
+                       byteclk,
+                       t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+                       bl, pps, tot,
+                       TO_DSI_T(t->hss),
+                       TO_DSI_T(t->hsa),
+                       TO_DSI_T(t->hse),
+                       TO_DSI_T(t->hbp),
+                       TO_DSI_T(pps),
+                       TO_DSI_T(t->hfp),
+
+                       TO_DSI_T(bl),
+                       TO_DSI_T(pps),
+
+                       TO_DSI_T(tot));
+#undef TO_DSI_T
+}
+
+static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+{
+       unsigned long pck = t->pixel_clock * 1000;
+       int hact, bl, tot;
+
+       hact = t->x_res;
+       bl = t->hsw + t->hbp + t->hfp;
+       tot = hact + bl;
+
+#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
+
+       pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
+                       "%u/%u/%u/%u = %u + %u = %u\n",
+                       str,
+                       pck,
+                       t->hsw, t->hbp, hact, t->hfp,
+                       bl, hact, tot,
+                       TO_DISPC_T(t->hsw),
+                       TO_DISPC_T(t->hbp),
+                       TO_DISPC_T(hact),
+                       TO_DISPC_T(t->hfp),
+                       TO_DISPC_T(bl),
+                       TO_DISPC_T(hact),
+                       TO_DISPC_T(tot));
+#undef TO_DISPC_T
+}
+
+/* note: this is not quite accurate */
+static void print_dsi_dispc_vm(const char *str,
+               const struct omap_dss_dsi_videomode_timings *t)
+{
+       struct omap_video_timings vm = { 0 };
+       unsigned long byteclk = t->hsclk / 4;
+       unsigned long pck;
+       u64 dsi_tput;
+       int dsi_hact, dsi_htot;
+
+       dsi_tput = (u64)byteclk * t->ndl * 8;
+       pck = (u32)div64_u64(dsi_tput, t->bitspp);
+       dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
+       dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+
+       vm.pixel_clock = pck / 1000;
+       vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+       vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
+       vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
+       vm.x_res = t->hact;
+
+       print_dispc_vm(str, &vm);
+}
+#endif /* PRINT_VERBOSE_VM_TIMINGS */
+
+static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx *ctx = data;
+       struct omap_video_timings *t = &ctx->dispc_vm;
 
-       mutex_lock(&dsi->lock);
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
 
-       dsi->timings = *timings;
+       *t = *ctx->config->timings;
+       t->pixel_clock = pck / 1000;
+       t->x_res = ctx->config->timings->x_res;
+       t->y_res = ctx->config->timings->y_res;
+       t->hsw = t->hfp = t->hbp = t->vsw = 1;
+       t->vfp = t->vbp = 0;
 
-       mutex_unlock(&dsi->lock);
+       return true;
 }
-EXPORT_SYMBOL(omapdss_dsi_set_timings);
 
-void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
+static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx *ctx = data;
 
-       mutex_lock(&dsi->lock);
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
 
-       dsi->timings.x_res = w;
-       dsi->timings.y_res = h;
+       return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
+                       dsi_cm_calc_dispc_cb, ctx);
+}
 
-       mutex_unlock(&dsi->lock);
+static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+                       dsi_cm_calc_hsdiv_cb, ctx);
 }
-EXPORT_SYMBOL(omapdss_dsi_set_size);
 
-void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_pixel_format fmt)
+static bool dsi_cm_calc(struct dsi_data *dsi,
+               const struct omap_dss_dsi_config *cfg,
+               struct dsi_clk_calc_ctx *ctx)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       unsigned long clkin;
+       int bitspp, ndl;
+       unsigned long pll_min, pll_max;
+       unsigned long pck, txbyteclk;
 
-       mutex_lock(&dsi->lock);
+       clkin = clk_get_rate(dsi->sys_clk);
+       bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       ndl = dsi->num_lanes_used - 1;
+
+       /*
+        * Here we should calculate minimum txbyteclk to be able to send the
+        * frame in time, and also to handle TE. That's not very simple, though,
+        * especially as we go to LP between each pixel packet due to HW
+        * "feature". So let's just estimate very roughly and multiply by 1.5.
+        */
+       pck = cfg->timings->pixel_clock * 1000;
+       pck = pck * 3 / 2;
+       txbyteclk = pck * bitspp / 8 / ndl;
 
-       dsi->pix_fmt = fmt;
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dsi->pdev;
+       ctx->config = cfg;
+       ctx->req_pck_min = pck;
+       ctx->req_pck_nom = pck;
+       ctx->req_pck_max = pck * 3 / 2;
+       ctx->dsi_cinfo.clkin = clkin;
 
-       mutex_unlock(&dsi->lock);
+       pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
+       pll_max = cfg->hs_clk_max * 4;
+
+       return dsi_pll_calc(dsi->pdev, clkin,
+                       pll_min, pll_max,
+                       dsi_cm_calc_pll_cb, ctx);
 }
-EXPORT_SYMBOL(omapdss_dsi_set_pixel_format);
 
-void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_mode mode)
+static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
 {
-       struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+       const struct omap_dss_dsi_config *cfg = ctx->config;
+       int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       int ndl = dsi->num_lanes_used - 1;
+       unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4;
+       unsigned long byteclk = hsclk / 4;
 
-       mutex_lock(&dsi->lock);
+       unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
+       int xres;
+       int panel_htot, panel_hbl; /* pixels */
+       int dispc_htot, dispc_hbl; /* pixels */
+       int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
+       int hfp, hsa, hbp;
+       const struct omap_video_timings *req_vm;
+       struct omap_video_timings *dispc_vm;
+       struct omap_dss_dsi_videomode_timings *dsi_vm;
+       u64 dsi_tput, dispc_tput;
 
-       dsi->mode = mode;
+       dsi_tput = (u64)byteclk * ndl * 8;
 
-       mutex_unlock(&dsi->lock);
+       req_vm = cfg->timings;
+       req_pck_min = ctx->req_pck_min;
+       req_pck_max = ctx->req_pck_max;
+       req_pck_nom = ctx->req_pck_nom;
+
+       dispc_pck = ctx->dispc_cinfo.pck;
+       dispc_tput = (u64)dispc_pck * bitspp;
+
+       xres = req_vm->x_res;
+
+       panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+       panel_htot = xres + panel_hbl;
+
+       dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
+
+       /*
+        * When there are no line buffers, DISPC and DSI must have the
+        * same tput. Otherwise DISPC tput needs to be higher than DSI's.
+        */
+       if (dsi->line_buffer_size < xres * bitspp / 8) {
+               if (dispc_tput != dsi_tput)
+                       return false;
+       } else {
+               if (dispc_tput < dsi_tput)
+                       return false;
+       }
+
+       /* DSI tput must be over the min requirement */
+       if (dsi_tput < (u64)bitspp * req_pck_min)
+               return false;
+
+       /* When non-burst mode, DSI tput must be below max requirement. */
+       if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
+               if (dsi_tput > (u64)bitspp * req_pck_max)
+                       return false;
+       }
+
+       hss = DIV_ROUND_UP(4, ndl);
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+               if (ndl == 3 && req_vm->hsw == 0)
+                       hse = 1;
+               else
+                       hse = DIV_ROUND_UP(4, ndl);
+       } else {
+               hse = 0;
+       }
+
+       /* DSI htot to match the panel's nominal pck */
+       dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);
+
+       /* fail if there would be no time for blanking */
+       if (dsi_htot < hss + hse + dsi_hact)
+               return false;
+
+       /* total DSI blanking needed to achieve panel's TL */
+       dsi_hbl = dsi_htot - dsi_hact;
+
+       /* DISPC htot to match the DSI TL */
+       dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);
+
+       /* verify that the DSI and DISPC TLs are the same */
+       if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
+               return false;
+
+       dispc_hbl = dispc_htot - xres;
+
+       /* setup DSI videomode */
+
+       dsi_vm = &ctx->dsi_vm;
+       memset(dsi_vm, 0, sizeof(*dsi_vm));
+
+       dsi_vm->hsclk = hsclk;
+
+       dsi_vm->ndl = ndl;
+       dsi_vm->bitspp = bitspp;
+
+       if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
+               hsa = 0;
+       } else if (ndl == 3 && req_vm->hsw == 0) {
+               hsa = 0;
+       } else {
+               hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+               hsa = max(hsa - hse, 1);
+       }
+
+       hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+       hbp = max(hbp, 1);
+
+       hfp = dsi_hbl - (hss + hsa + hse + hbp);
+       if (hfp < 1) {
+               int t;
+               /* we need to take cycles from hbp */
+
+               t = 1 - hfp;
+               hbp = max(hbp - t, 1);
+               hfp = dsi_hbl - (hss + hsa + hse + hbp);
+
+               if (hfp < 1 && hsa > 0) {
+                       /* we need to take cycles from hsa */
+                       t = 1 - hfp;
+                       hsa = max(hsa - t, 1);
+                       hfp = dsi_hbl - (hss + hsa + hse + hbp);
+               }
+       }
+
+       if (hfp < 1)
+               return false;
+
+       dsi_vm->hss = hss;
+       dsi_vm->hsa = hsa;
+       dsi_vm->hse = hse;
+       dsi_vm->hbp = hbp;
+       dsi_vm->hact = xres;
+       dsi_vm->hfp = hfp;
+
+       dsi_vm->vsa = req_vm->vsw;
+       dsi_vm->vbp = req_vm->vbp;
+       dsi_vm->vact = req_vm->y_res;
+       dsi_vm->vfp = req_vm->vfp;
+
+       dsi_vm->trans_mode = cfg->trans_mode;
+
+       dsi_vm->blanking_mode = 0;
+       dsi_vm->hsa_blanking_mode = 1;
+       dsi_vm->hfp_blanking_mode = 1;
+       dsi_vm->hbp_blanking_mode = 1;
+
+       dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
+       dsi_vm->window_sync = 4;
+
+       /* setup DISPC videomode */
+
+       dispc_vm = &ctx->dispc_vm;
+       *dispc_vm = *req_vm;
+       dispc_vm->pixel_clock = dispc_pck / 1000;
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
+               hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+                               req_pck_nom);
+               hsa = max(hsa, 1);
+       } else {
+               hsa = 1;
+       }
+
+       hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+       hbp = max(hbp, 1);
+
+       hfp = dispc_hbl - hsa - hbp;
+       if (hfp < 1) {
+               int t;
+               /* we need to take cycles from hbp */
+
+               t = 1 - hfp;
+               hbp = max(hbp - t, 1);
+               hfp = dispc_hbl - hsa - hbp;
+
+               if (hfp < 1) {
+                       /* we need to take cycles from hsa */
+                       t = 1 - hfp;
+                       hsa = max(hsa - t, 1);
+                       hfp = dispc_hbl - hsa - hbp;
+               }
+       }
+
+       if (hfp < 1)
+               return false;
+
+       dispc_vm->hfp = hfp;
+       dispc_vm->hsw = hsa;
+       dispc_vm->hbp = hbp;
+
+       return true;
+}
+
+
+static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       if (dsi_vm_calc_blanking(ctx) == false)
+               return false;
+
+#ifdef PRINT_VERBOSE_VM_TIMINGS
+       print_dispc_vm("dispc", &ctx->dispc_vm);
+       print_dsi_vm("dsi  ", &ctx->dsi_vm);
+       print_dispc_vm("req  ", ctx->config->timings);
+       print_dsi_dispc_vm("act  ", &ctx->dsi_vm);
+#endif
+
+       return true;
+}
+
+static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+               void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+       unsigned long pck_max;
+
+       ctx->dsi_cinfo.regm_dispc = regm_dispc;
+       ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+
+       /*
+        * In burst mode we can let the dispc pck be arbitrarily high, but it
+        * limits our scaling abilities. So for now, don't aim too high.
+        */
+
+       if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
+               pck_max = ctx->req_pck_max + 10000000;
+       else
+               pck_max = ctx->req_pck_max;
+
+       return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
+                       dsi_vm_calc_dispc_cb, ctx);
+}
+
+static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data)
+{
+       struct dsi_clk_calc_ctx *ctx = data;
+
+       ctx->dsi_cinfo.regn = regn;
+       ctx->dsi_cinfo.regm = regm;
+       ctx->dsi_cinfo.fint = fint;
+       ctx->dsi_cinfo.clkin4ddr = pll;
+
+       return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+                       dsi_vm_calc_hsdiv_cb, ctx);
+}
+
+static bool dsi_vm_calc(struct dsi_data *dsi,
+               const struct omap_dss_dsi_config *cfg,
+               struct dsi_clk_calc_ctx *ctx)
+{
+       const struct omap_video_timings *t = cfg->timings;
+       unsigned long clkin;
+       unsigned long pll_min;
+       unsigned long pll_max;
+       int ndl = dsi->num_lanes_used - 1;
+       int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+       unsigned long byteclk_min;
+
+       clkin = clk_get_rate(dsi->sys_clk);
+
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->dsidev = dsi->pdev;
+       ctx->config = cfg;
+
+       ctx->dsi_cinfo.clkin = clkin;
+
+       /* these limits should come from the panel driver */
+       ctx->req_pck_min = t->pixel_clock * 1000 - 1000;
+       ctx->req_pck_nom = t->pixel_clock * 1000;
+       ctx->req_pck_max = t->pixel_clock * 1000 + 1000;
+
+       byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
+       pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
+
+       if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
+               pll_max = cfg->hs_clk_max * 4;
+       } else {
+               unsigned long byteclk_max;
+               byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
+                               ndl * 8);
+
+               pll_max = byteclk_max * 4 * 4;
+       }
+
+       return dsi_pll_calc(dsi->pdev, clkin,
+                       pll_min, pll_max,
+                       dsi_vm_calc_pll_cb, ctx);
 }
-EXPORT_SYMBOL(omapdss_dsi_set_operation_mode);
 
-void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
-               struct omap_dss_dsi_videomode_timings *timings)
+int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+               const struct omap_dss_dsi_config *config)
 {
        struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+       struct dsi_clk_calc_ctx ctx;
+       bool ok;
+       int r;
 
        mutex_lock(&dsi->lock);
 
-       dsi->vm_timings = *timings;
+       dsi->pix_fmt = config->pixel_format;
+       dsi->mode = config->mode;
+
+       if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
+               ok = dsi_vm_calc(dsi, config, &ctx);
+       else
+               ok = dsi_cm_calc(dsi, config, &ctx);
+
+       if (!ok) {
+               DSSERR("failed to find suitable DSI clock settings\n");
+               r = -EINVAL;
+               goto err;
+       }
+
+       dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
+
+       r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min,
+                       config->lp_clk_max);
+       if (r) {
+               DSSERR("failed to find suitable DSI LP clock settings\n");
+               goto err;
+       }
+
+       dsi->user_dsi_cinfo = ctx.dsi_cinfo;
+       dsi->user_dispc_cinfo = ctx.dispc_cinfo;
+
+       dsi->timings = ctx.dispc_vm;
+       dsi->vm_timings = ctx.dsi_vm;
 
        mutex_unlock(&dsi->lock);
+
+       return 0;
+err:
+       mutex_unlock(&dsi->lock);
+
+       return r;
+}
+EXPORT_SYMBOL(omapdss_dsi_set_config);
+
+/*
+ * Return a hardcoded channel for the DSI output. This should work for
+ * current use cases, but this can be later expanded to either resolve
+ * the channel in some more dynamic manner, or get the channel as a user
+ * parameter.
+ */
+static enum omap_channel dsi_get_channel(int module_id)
+{
+       switch (omapdss_get_version()) {
+       case OMAPDSS_VER_OMAP24xx:
+               DSSWARN("DSI not supported\n");
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP34xx_ES1:
+       case OMAPDSS_VER_OMAP34xx_ES3:
+       case OMAPDSS_VER_OMAP3630:
+       case OMAPDSS_VER_AM35xx:
+               return OMAP_DSS_CHANNEL_LCD;
+
+       case OMAPDSS_VER_OMAP4430_ES1:
+       case OMAPDSS_VER_OMAP4430_ES2:
+       case OMAPDSS_VER_OMAP4:
+               switch (module_id) {
+               case 0:
+                       return OMAP_DSS_CHANNEL_LCD;
+               case 1:
+                       return OMAP_DSS_CHANNEL_LCD2;
+               default:
+                       DSSWARN("unsupported module id\n");
+                       return OMAP_DSS_CHANNEL_LCD;
+               }
+
+       case OMAPDSS_VER_OMAP5:
+               switch (module_id) {
+               case 0:
+                       return OMAP_DSS_CHANNEL_LCD;
+               case 1:
+                       return OMAP_DSS_CHANNEL_LCD3;
+               default:
+                       DSSWARN("unsupported module id\n");
+                       return OMAP_DSS_CHANNEL_LCD;
+               }
+
+       default:
+               DSSWARN("unsupported DSS version\n");
+               return OMAP_DSS_CHANNEL_LCD;
+       }
 }
-EXPORT_SYMBOL(omapdss_dsi_set_videomode_timings);
 
 static int __init dsi_init_display(struct omap_dss_device *dssdev)
 {
@@ -5073,7 +5347,7 @@ static int dsi_get_clocks(struct platform_device *dsidev)
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct clk *clk;
 
-       clk = clk_get(&dsidev->dev, "fck");
+       clk = devm_clk_get(&dsidev->dev, "fck");
        if (IS_ERR(clk)) {
                DSSERR("can't get fck\n");
                return PTR_ERR(clk);
@@ -5081,11 +5355,9 @@ static int dsi_get_clocks(struct platform_device *dsidev)
 
        dsi->dss_clk = clk;
 
-       clk = clk_get(&dsidev->dev, "sys_clk");
+       clk = devm_clk_get(&dsidev->dev, "sys_clk");
        if (IS_ERR(clk)) {
                DSSERR("can't get sys_clk\n");
-               clk_put(dsi->dss_clk);
-               dsi->dss_clk = NULL;
                return PTR_ERR(clk);
        }
 
@@ -5094,16 +5366,6 @@ static int dsi_get_clocks(struct platform_device *dsidev)
        return 0;
 }
 
-static void dsi_put_clocks(struct platform_device *dsidev)
-{
-       struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
-       if (dsi->dss_clk)
-               clk_put(dsi->dss_clk);
-       if (dsi->sys_clk)
-               clk_put(dsi->sys_clk);
-}
-
 static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -5188,6 +5450,8 @@ static void __init dsi_init_output(struct platform_device *dsidev)
                        OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
 
        out->type = OMAP_DISPLAY_TYPE_DSI;
+       out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
+       out->dispc_channel = dsi_get_channel(dsi->module_id);
 
        dss_register_output(out);
 }
@@ -5293,6 +5557,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
        else
                dsi->num_lanes_supported = 3;
 
+       dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
+
        dsi_init_output(dsidev);
 
        dsi_probe_pdata(dsidev);
@@ -5314,7 +5580,6 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
 
 err_runtime_get:
        pm_runtime_disable(&dsidev->dev);
-       dsi_put_clocks(dsidev);
        return r;
 }
 
@@ -5330,8 +5595,6 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
 
        pm_runtime_disable(&dsidev->dev);
 
-       dsi_put_clocks(dsidev);
-
        if (dsi->vdds_dsi_reg != NULL) {
                if (dsi->vdds_dsi_enabled) {
                        regulator_disable(dsi->vdds_dsi_reg);
index 054c2a22b3f1c650aca01037bd6d125d3a55235f..94f66f9f10a3664958623327fb59fe30de101f27 100644 (file)
@@ -473,6 +473,47 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
        return 0;
 }
 
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
+{
+       int fckd, fckd_start, fckd_stop;
+       unsigned long fck;
+       unsigned long fck_hw_max;
+       unsigned long fckd_hw_max;
+       unsigned long prate;
+       unsigned m;
+
+       if (dss.dpll4_m4_ck == NULL) {
+               /*
+                * TODO: dss1_fclk can be changed on OMAP2, but the available
+                * dividers are not continuous. We just use the pre-set rate for
+                * now.
+                */
+               fck = clk_get_rate(dss.dss_clk);
+               fckd = 1;
+               return func(fckd, fck, data);
+       }
+
+       fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+       fckd_hw_max = dss.feat->fck_div_max;
+
+       m = dss.feat->dss_fck_multiplier;
+       prate = dss_get_dpll4_rate();
+
+       fck_min = fck_min ? fck_min : 1;
+
+       fckd_start = min(prate * m / fck_min, fckd_hw_max);
+       fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
+
+       for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
+               fck = prate / fckd * m;
+
+               if (func(fckd, fck, data))
+                       return true;
+       }
+
+       return false;
+}
+
 int dss_set_clock_div(struct dss_clock_info *cinfo)
 {
        if (dss.dpll4_m4_ck) {
@@ -482,7 +523,8 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
                prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
                DSSDBG("dpll4_m4 = %ld\n", prate);
 
-               r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
+               r = clk_set_rate(dss.dpll4_m4_ck,
+                               DIV_ROUND_UP(prate, cinfo->fck_div));
                if (r)
                        return r;
        } else {
@@ -492,7 +534,9 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
 
        dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
 
-       WARN_ONCE(dss.dss_clk_rate != cinfo->fck, "clk rate mismatch");
+       WARN_ONCE(dss.dss_clk_rate != cinfo->fck,
+                       "clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
+                       cinfo->fck);
 
        DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
 
@@ -542,121 +586,6 @@ static int dss_setup_default_clock(void)
        return 0;
 }
 
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       unsigned long prate;
-       struct dss_clock_info best_dss;
-       struct dispc_clock_info best_dispc;
-
-       unsigned long fck, max_dss_fck;
-
-       u16 fck_div;
-
-       int match = 0;
-       int min_fck_per_pck;
-
-       prate = dss_get_dpll4_rate();
-
-       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
-       fck = clk_get_rate(dss.dss_clk);
-       if (req_pck == dss.cache_req_pck && prate == dss.cache_prate &&
-               dss.cache_dss_cinfo.fck == fck) {
-               DSSDBG("dispc clock info found from cache.\n");
-               *dss_cinfo = dss.cache_dss_cinfo;
-               *dispc_cinfo = dss.cache_dispc_cinfo;
-               return 0;
-       }
-
-       min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
-
-       if (min_fck_per_pck &&
-               req_pck * min_fck_per_pck > max_dss_fck) {
-               DSSERR("Requested pixel clock not possible with the current "
-                               "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
-                               "the constraint off.\n");
-               min_fck_per_pck = 0;
-       }
-
-retry:
-       memset(&best_dss, 0, sizeof(best_dss));
-       memset(&best_dispc, 0, sizeof(best_dispc));
-
-       if (dss.dpll4_m4_ck == NULL) {
-               struct dispc_clock_info cur_dispc;
-               /* XXX can we change the clock on omap2? */
-               fck = clk_get_rate(dss.dss_clk);
-               fck_div = 1;
-
-               dispc_find_clk_divs(req_pck, fck, &cur_dispc);
-               match = 1;
-
-               best_dss.fck = fck;
-               best_dss.fck_div = fck_div;
-
-               best_dispc = cur_dispc;
-
-               goto found;
-       } else {
-               for (fck_div = dss.feat->fck_div_max; fck_div > 0; --fck_div) {
-                       struct dispc_clock_info cur_dispc;
-
-                       fck = prate / fck_div * dss.feat->dss_fck_multiplier;
-
-                       if (fck > max_dss_fck)
-                               continue;
-
-                       if (min_fck_per_pck &&
-                                       fck < req_pck * min_fck_per_pck)
-                               continue;
-
-                       match = 1;
-
-                       dispc_find_clk_divs(req_pck, fck, &cur_dispc);
-
-                       if (abs(cur_dispc.pck - req_pck) <
-                                       abs(best_dispc.pck - req_pck)) {
-
-                               best_dss.fck = fck;
-                               best_dss.fck_div = fck_div;
-
-                               best_dispc = cur_dispc;
-
-                               if (cur_dispc.pck == req_pck)
-                                       goto found;
-                       }
-               }
-       }
-
-found:
-       if (!match) {
-               if (min_fck_per_pck) {
-                       DSSERR("Could not find suitable clock settings.\n"
-                                       "Turning FCK/PCK constraint off and"
-                                       "trying again.\n");
-                       min_fck_per_pck = 0;
-                       goto retry;
-               }
-
-               DSSERR("Could not find suitable clock settings.\n");
-
-               return -EINVAL;
-       }
-
-       if (dss_cinfo)
-               *dss_cinfo = best_dss;
-       if (dispc_cinfo)
-               *dispc_cinfo = best_dispc;
-
-       dss.cache_req_pck = req_pck;
-       dss.cache_prate = prate;
-       dss.cache_dss_cinfo = best_dss;
-       dss.cache_dispc_cinfo = best_dispc;
-
-       return 0;
-}
-
 void dss_set_venc_output(enum omap_dss_venc_type type)
 {
        int l = 0;
@@ -767,13 +696,11 @@ int dss_dpi_select_source(enum omap_channel channel)
 static int dss_get_clocks(void)
 {
        struct clk *clk;
-       int r;
 
-       clk = clk_get(&dss.pdev->dev, "fck");
+       clk = devm_clk_get(&dss.pdev->dev, "fck");
        if (IS_ERR(clk)) {
                DSSERR("can't get clock fck\n");
-               r = PTR_ERR(clk);
-               goto err;
+               return PTR_ERR(clk);
        }
 
        dss.dss_clk = clk;
@@ -782,8 +709,7 @@ static int dss_get_clocks(void)
                clk = clk_get(NULL, dss.feat->clk_name);
                if (IS_ERR(clk)) {
                        DSSERR("Failed to get %s\n", dss.feat->clk_name);
-                       r = PTR_ERR(clk);
-                       goto err;
+                       return PTR_ERR(clk);
                }
        } else {
                clk = NULL;
@@ -792,21 +718,12 @@ static int dss_get_clocks(void)
        dss.dpll4_m4_ck = clk;
 
        return 0;
-
-err:
-       if (dss.dss_clk)
-               clk_put(dss.dss_clk);
-       if (dss.dpll4_m4_ck)
-               clk_put(dss.dpll4_m4_ck);
-
-       return r;
 }
 
 static void dss_put_clocks(void)
 {
        if (dss.dpll4_m4_ck)
                clk_put(dss.dpll4_m4_ck);
-       clk_put(dss.dss_clk);
 }
 
 static int dss_runtime_get(void)
index 610c8e563daa2033750c3eec12f3328ad7db38e4..faaf35857b0e5a6abd36f526d84f0660a70420cf 100644 (file)
@@ -268,8 +268,9 @@ void dss_set_dac_pwrdn_bgz(bool enable);
 unsigned long dss_get_dpll4_rate(void);
 int dss_calc_clock_rates(struct dss_clock_info *cinfo);
 int dss_set_clock_div(struct dss_clock_info *cinfo);
-int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
-               struct dispc_clock_info *dispc_cinfo);
+
+typedef bool (*dss_div_calc_func)(int fckd, unsigned long fck, void *data);
+bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data);
 
 /* SDI */
 int sdi_init_platform_driver(void) __init;
@@ -292,12 +293,21 @@ void dsi_dump_clocks(struct seq_file *s);
 void dsi_irq_handler(void);
 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
 
+unsigned long dsi_get_pll_clkin(struct platform_device *dsidev);
+
+typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
+               unsigned long pll, void *data);
+typedef bool (*dsi_hsdiv_calc_func)(int regm_dispc, unsigned long dispc,
+               void *data);
+bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
+               unsigned long out_min, dsi_hsdiv_calc_func func, void *data);
+bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
+               unsigned long pll_min, unsigned long pll_max,
+               dsi_pll_calc_func func, void *data);
+
 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
 int dsi_pll_set_clock_div(struct platform_device *dsidev,
                struct dsi_clock_info *cinfo);
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck, struct dsi_clock_info *cinfo,
-               struct dispc_clock_info *dispc_cinfo);
 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
                bool enable_hsdiv);
 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
@@ -328,14 +338,6 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
        WARN("%s: DSI not compiled in\n", __func__);
        return -ENODEV;
 }
-static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
-               unsigned long req_pck,
-               struct dsi_clock_info *dsi_cinfo,
-               struct dispc_clock_info *dispc_cinfo)
-{
-       WARN("%s: DSI not compiled in\n", __func__);
-       return -ENODEV;
-}
 static inline int dsi_pll_init(struct platform_device *dsidev,
                bool enable_hsclk, bool enable_hsdiv)
 {
@@ -376,11 +378,15 @@ void dispc_enable_fifomerge(bool enable);
 void dispc_enable_gamma_table(bool enable);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
+typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data);
+bool dispc_div_calc(unsigned long dispc,
+               unsigned long pck_min, unsigned long pck_max,
+               dispc_div_calc_func func, void *data);
+
 bool dispc_mgr_timings_ok(enum omap_channel channel,
                const struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
-void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
-               struct dispc_clock_info *cinfo);
 int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
                struct dispc_clock_info *cinfo);
 
index d7d66ef5cb58098c6f80268651c0a277957fc2e0..77dbe0cfb34ca28c540e5c22c9a593b6ae84018d 100644 (file)
@@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
 
 static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
        /* OMAP_DSS_CHANNEL_LCD */
-       OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
-       OMAP_DSS_OUTPUT_DSI1,
+       OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
 
        /* OMAP_DSS_CHANNEL_DIGIT */
-       OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI |
-       OMAP_DSS_OUTPUT_DPI,
+       OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
 
        /* OMAP_DSS_CHANNEL_LCD2 */
        OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
@@ -416,7 +414,7 @@ static const char * const omap5_dss_clk_source_names[] = {
 };
 
 static const struct dss_param_range omap2_dss_param_range[] = {
-       [FEAT_PARAM_DSS_FCK]                    = { 0, 173000000 },
+       [FEAT_PARAM_DSS_FCK]                    = { 0, 133000000 },
        [FEAT_PARAM_DSS_PCD]                    = { 2, 255 },
        [FEAT_PARAM_DSIPLL_REGN]                = { 0, 0 },
        [FEAT_PARAM_DSIPLL_REGM]                = { 0, 0 },
@@ -461,15 +459,15 @@ static const struct dss_param_range omap4_dss_param_range[] = {
 };
 
 static const struct dss_param_range omap5_dss_param_range[] = {
-       [FEAT_PARAM_DSS_FCK]                    = { 0, 200000000 },
+       [FEAT_PARAM_DSS_FCK]                    = { 0, 209250000 },
        [FEAT_PARAM_DSS_PCD]                    = { 1, 255 },
        [FEAT_PARAM_DSIPLL_REGN]                = { 0, (1 << 8) - 1 },
        [FEAT_PARAM_DSIPLL_REGM]                = { 0, (1 << 12) - 1 },
        [FEAT_PARAM_DSIPLL_REGM_DISPC]          = { 0, (1 << 5) - 1 },
        [FEAT_PARAM_DSIPLL_REGM_DSI]            = { 0, (1 << 5) - 1 },
-       [FEAT_PARAM_DSIPLL_FINT]                = { 500000, 2500000 },
+       [FEAT_PARAM_DSIPLL_FINT]                = { 150000, 52000000 },
        [FEAT_PARAM_DSIPLL_LPDIV]               = { 0, (1 << 13) - 1 },
-       [FEAT_PARAM_DSI_FCK]                    = { 0, 170000000 },
+       [FEAT_PARAM_DSI_FCK]                    = { 0, 209250000 },
        [FEAT_PARAM_DOWNSCALE]                  = { 1, 4 },
        [FEAT_PARAM_LINEWIDTH]                  = { 1, 2048 },
 };
index 72923645dcceddbaf303fdf601f165c6302cf346..79393099d50527a1909c5841735cd63a2eac7158 100644 (file)
@@ -472,17 +472,12 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
         * Input clock is predivided by N + 1
         * out put of which is reference clk
         */
-       if (dssdev->clocks.hdmi.regn == 0)
-               pi->regn = HDMI_DEFAULT_REGN;
-       else
-               pi->regn = dssdev->clocks.hdmi.regn;
+
+       pi->regn = HDMI_DEFAULT_REGN;
 
        refclk = clkin / pi->regn;
 
-       if (dssdev->clocks.hdmi.regm2 == 0)
-               pi->regm2 = HDMI_DEFAULT_REGM2;
-       else
-               pi->regm2 = dssdev->clocks.hdmi.regm2;
+       pi->regm2 = HDMI_DEFAULT_REGM2;
 
        /*
         * multiplier is pixel_clk/ref_clk
@@ -804,7 +799,7 @@ static int hdmi_get_clocks(struct platform_device *pdev)
 {
        struct clk *clk;
 
-       clk = clk_get(&pdev->dev, "sys_clk");
+       clk = devm_clk_get(&pdev->dev, "sys_clk");
        if (IS_ERR(clk)) {
                DSSERR("can't get sys_clk\n");
                return PTR_ERR(clk);
@@ -815,12 +810,6 @@ static int hdmi_get_clocks(struct platform_device *pdev)
        return 0;
 }
 
-static void hdmi_put_clocks(void)
-{
-       if (hdmi.sys_clk)
-               clk_put(hdmi.sys_clk);
-}
-
 #if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
 int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
 {
@@ -1017,8 +1006,6 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
        hdmi.ls_oe_gpio = priv->ls_oe_gpio;
        hdmi.hpd_gpio = priv->hpd_gpio;
 
-       dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
        r = hdmi_init_display(dssdev);
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
@@ -1051,6 +1038,8 @@ static void __init hdmi_init_output(struct platform_device *pdev)
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_HDMI;
        out->type = OMAP_DISPLAY_TYPE_HDMI;
+       out->name = "hdmi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
 
        dss_register_output(out);
 }
@@ -1097,23 +1086,19 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
        hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
        hdmi.ip_data.phy_offset = HDMI_PHY;
 
+       hdmi_init_output(pdev);
+
        r = hdmi_panel_init();
        if (r) {
                DSSERR("can't init panel\n");
-               goto err_panel_init;
+               return r;
        }
 
        dss_debugfs_create_file("hdmi", hdmi_dump_regs);
 
-       hdmi_init_output(pdev);
-
        hdmi_probe_pdata(pdev);
 
        return 0;
-
-err_panel_init:
-       hdmi_put_clocks();
-       return r;
 }
 
 static int __exit hdmi_remove_child(struct device *dev, void *data)
@@ -1135,8 +1120,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
-       hdmi_put_clocks();
-
        return 0;
 }
 
index 79dea1a1a7320a6f4198891b10172ee10f7afe6b..5214df63e0a9952b0332bfd81dcbbf4f95c77a1b 100644 (file)
@@ -113,6 +113,7 @@ struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
 
        return NULL;
 }
+EXPORT_SYMBOL(omap_dss_get_output);
 
 static const struct dss_mgr_ops *dss_mgr_ops;
 
index e903dd3f54d9466e802126e3801ac0c86dc10d4c..1a691bb27547f3f8c5ebff21cf51063e7053fa0d 100644 (file)
@@ -1025,6 +1025,8 @@ static void __init rfbi_init_output(struct platform_device *pdev)
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_DBI;
        out->type = OMAP_DISPLAY_TYPE_DBI;
+       out->name = "rfbi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 
        dss_register_output(out);
 }
index 62b5374ce438f3ff7e4db23d4ad5c6d5165456cd..e6baee2e84f8cb4b21672bf2e3eb3af01f6b9b20 100644 (file)
@@ -41,6 +41,72 @@ static struct {
        struct omap_dss_output output;
 } sdi;
 
+struct sdi_clk_calc_ctx {
+       unsigned long pck_min, pck_max;
+
+       struct dss_clock_info dss_cinfo;
+       struct dispc_clock_info dispc_cinfo;
+};
+
+static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
+               unsigned long pck, void *data)
+{
+       struct sdi_clk_calc_ctx *ctx = data;
+
+       ctx->dispc_cinfo.lck_div = lckd;
+       ctx->dispc_cinfo.pck_div = pckd;
+       ctx->dispc_cinfo.lck = lck;
+       ctx->dispc_cinfo.pck = pck;
+
+       return true;
+}
+
+static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+{
+       struct sdi_clk_calc_ctx *ctx = data;
+
+       ctx->dss_cinfo.fck = fck;
+       ctx->dss_cinfo.fck_div = fckd;
+
+       return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
+                       dpi_calc_dispc_cb, ctx);
+}
+
+static int sdi_calc_clock_div(unsigned long pclk,
+               struct dss_clock_info *dss_cinfo,
+               struct dispc_clock_info *dispc_cinfo)
+{
+       int i;
+       struct sdi_clk_calc_ctx ctx;
+
+       /*
+        * DSS fclk gives us very few possibilities, so finding a good pixel
+        * clock may not be possible. We try multiple times to find the clock,
+        * each time widening the pixel clock range we look for, up to
+        * +/- 1MHz.
+        */
+
+       for (i = 0; i < 10; ++i) {
+               bool ok;
+
+               memset(&ctx, 0, sizeof(ctx));
+               if (pclk > 1000 * i * i * i)
+                       ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
+               else
+                       ctx.pck_min = 0;
+               ctx.pck_max = pclk + 1000 * i * i * i;
+
+               ok = dss_div_calc(ctx.pck_min, dpi_calc_dss_cb, &ctx);
+               if (ok) {
+                       *dss_cinfo = ctx.dss_cinfo;
+                       *dispc_cinfo = ctx.dispc_cinfo;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
 static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
 {
        struct omap_overlay_manager *mgr = dssdev->output->manager;
@@ -88,7 +154,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
        t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
        t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
 
-       r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
+       r = sdi_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
        if (r)
                goto err_calc_clock_div;
 
@@ -278,6 +344,8 @@ static void __init sdi_init_output(struct platform_device *pdev)
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_SDI;
        out->type = OMAP_DISPLAY_TYPE_SDI;
+       out->name = "sdi.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 
        dss_register_output(out);
 }
index 006caf3cb509778d472d0a28d528cc3bde16748e..17764d1363983ca178f85a89fe5e9cf2d044425e 100644 (file)
@@ -519,10 +519,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
                goto err0;
        }
 
-       if (dssdev->platform_enable)
-               dssdev->platform_enable(dssdev);
-
-
        r = venc_power_on(dssdev);
        if (r)
                goto err1;
@@ -533,8 +529,6 @@ int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
 
        return 0;
 err1:
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
        omap_dss_stop_device(dssdev);
 err0:
        mutex_unlock(&venc.venc_lock);
@@ -551,9 +545,6 @@ void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
 
        omap_dss_stop_device(dssdev);
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        mutex_unlock(&venc.venc_lock);
 }
 
@@ -721,7 +712,7 @@ static int venc_get_clocks(struct platform_device *pdev)
        struct clk *clk;
 
        if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
-               clk = clk_get(&pdev->dev, "tv_dac_clk");
+               clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
                if (IS_ERR(clk)) {
                        DSSERR("can't get tv_dac_clk\n");
                        return PTR_ERR(clk);
@@ -735,12 +726,6 @@ static int venc_get_clocks(struct platform_device *pdev)
        return 0;
 }
 
-static void venc_put_clocks(void)
-{
-       if (venc.tv_dac_clk)
-               clk_put(venc.tv_dac_clk);
-}
-
 static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -786,8 +771,6 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
 
        dss_copy_device_pdata(dssdev, plat_dssdev);
 
-       dssdev->channel = OMAP_DSS_CHANNEL_DIGIT;
-
        r = venc_init_display(dssdev);
        if (r) {
                DSSERR("device %s init failed: %d\n", dssdev->name, r);
@@ -819,6 +802,8 @@ static void __init venc_init_output(struct platform_device *pdev)
        out->pdev = pdev;
        out->id = OMAP_DSS_OUTPUT_VENC;
        out->type = OMAP_DISPLAY_TYPE_VENC;
+       out->name = "venc.0";
+       out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
 
        dss_register_output(out);
 }
@@ -886,7 +871,6 @@ static int __init omap_venchw_probe(struct platform_device *pdev)
 err_panel_init:
 err_runtime_get:
        pm_runtime_disable(&pdev->dev);
-       venc_put_clocks();
        return r;
 }
 
@@ -904,7 +888,6 @@ static int __exit omap_venchw_remove(struct platform_device *pdev)
        venc_uninit_output(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       venc_put_clocks();
 
        return 0;
 }
index ca585ef37f2528d36d0534200a911db95b07baff..f38348ea33757eac5ed3af01e1145e8b246c721a 100644 (file)
@@ -2388,7 +2388,7 @@ static int omapfb_init_connections(struct omapfb2_device *fbdev,
                struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
                struct omap_dss_output *out = dssdev->output;
 
-               mgr = omap_dss_get_overlay_manager(dssdev->channel);
+               mgr = omap_dss_get_overlay_manager(out->dispc_channel);
 
                if (!mgr || !out)
                        continue;
index 63203acef812211cb17788454116fcbe77debdd7..0264704a52be3d9e02b23178e489b2e567ec4c56 100644 (file)
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
        tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16)
            | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7);
        lcdc_write_chan(ch, LDHAJR, tmp);
+       lcdc_write_chan_mirror(ch, LDHAJR, tmp);
 }
 
 static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl)
index 5a32232cf7c15cdc05536ecb157eeb86048154e0..67af155cf60286f87c6dc1c576d72008d6e5c4fb 100644 (file)
@@ -182,7 +182,7 @@ config XEN_PRIVCMD
 
 config XEN_STUB
        bool "Xen stub drivers"
-       depends on XEN && X86_64
+       depends on XEN && X86_64 && BROKEN
        default n
        help
          Allow kernel to install stub drivers, to reserve space for Xen drivers,
index d17aa41a9041428fbff17a890b2af18b64422ef4..aa85881d17b23f7ff6425be0140b6aed0c63c942 100644 (file)
@@ -403,11 +403,23 @@ static void unmask_evtchn(int port)
 
        if (unlikely((cpu != cpu_from_evtchn(port))))
                do_hypercall = 1;
-       else
+       else {
+               /*
+                * Need to clear the mask before checking pending to
+                * avoid a race with an event becoming pending.
+                *
+                * EVTCHNOP_unmask will only trigger an upcall if the
+                * mask bit was set, so if a hypercall is needed
+                * remask the event.
+                */
+               sync_clear_bit(port, BM(&s->evtchn_mask[0]));
                evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
 
-       if (unlikely(evtchn_pending && xen_hvm_domain()))
-               do_hypercall = 1;
+               if (unlikely(evtchn_pending && xen_hvm_domain())) {
+                       sync_set_bit(port, BM(&s->evtchn_mask[0]));
+                       do_hypercall = 1;
+               }
+       }
 
        /* Slow path (hypercall) if this is a non-local port or if this is
         * an hvm domain and an event is pending (hvm domains don't have
@@ -418,8 +430,6 @@ static void unmask_evtchn(int port)
        } else {
                struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
-               sync_clear_bit(port, BM(&s->evtchn_mask[0]));
-
                /*
                 * The following is basically the equivalent of
                 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
index 0ef7c4d40f86b9018ed2ae2ed0eda2071d4d7bfd..b04fb64c5a91ee2613b21c8d9c146ab16f82a941 100644 (file)
@@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg)
 }
 EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
 
-int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
+int xen_physdev_op_compat(int cmd, void *arg)
 {
        struct physdev_op op;
        int rc;
@@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
 
        return rc;
 }
+EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
index f3278a6603ca3b0913280f4bfa5c7e07c0eff47a..90e34ac7e522dfcf2c5af269639f5f877d95ac3b 100644 (file)
@@ -505,6 +505,9 @@ static int __init xen_acpi_processor_init(void)
 
                pr = per_cpu(processors, i);
                perf = per_cpu_ptr(acpi_perf_data, i);
+               if (!pr)
+                       continue;
+
                pr->performance = perf;
                rc = acpi_processor_get_performance_info(pr);
                if (rc)
index 9204126f1560ced72321b267b96099acfe71b8dd..a2278ba7fb273a523476e0504388e49e48511093 100644 (file)
@@ -17,6 +17,7 @@
 #include <xen/events.h>
 #include <asm/xen/pci.h>
 #include <asm/xen/hypervisor.h>
+#include <xen/interface/physdev.h>
 #include "pciback.h"
 #include "conf_space.h"
 #include "conf_space_quirks.h"
@@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
 static void pcistub_device_release(struct kref *kref)
 {
        struct pcistub_device *psdev;
+       struct pci_dev *dev;
        struct xen_pcibk_dev_data *dev_data;
 
        psdev = container_of(kref, struct pcistub_device, kref);
-       dev_data = pci_get_drvdata(psdev->dev);
+       dev = psdev->dev;
+       dev_data = pci_get_drvdata(dev);
 
-       dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
+       dev_dbg(&dev->dev, "pcistub_device_release\n");
 
-       xen_unregister_device_domain_owner(psdev->dev);
+       xen_unregister_device_domain_owner(dev);
 
        /* Call the reset function which does not take lock as this
         * is called from "unbind" which takes a device_lock mutex.
         */
-       __pci_reset_function_locked(psdev->dev);
-       if (pci_load_and_free_saved_state(psdev->dev,
-                                         &dev_data->pci_saved_state)) {
-               dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n");
-       } else
-               pci_restore_state(psdev->dev);
+       __pci_reset_function_locked(dev);
+       if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
+               dev_dbg(&dev->dev, "Could not reload PCI state\n");
+       else
+               pci_restore_state(dev);
+
+       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+               struct physdev_pci_device ppdev = {
+                       .seg = pci_domain_nr(dev->bus),
+                       .bus = dev->bus->number,
+                       .devfn = dev->devfn
+               };
+               int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
+                                               &ppdev);
+
+               if (err)
+                       dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
+                                err);
+       }
 
        /* Disable the device */
-       xen_pcibk_reset_device(psdev->dev);
+       xen_pcibk_reset_device(dev);
 
        kfree(dev_data);
-       pci_set_drvdata(psdev->dev, NULL);
+       pci_set_drvdata(dev, NULL);
 
        /* Clean-up the device */
-       xen_pcibk_config_free_dyn_fields(psdev->dev);
-       xen_pcibk_config_free_dev(psdev->dev);
+       xen_pcibk_config_free_dyn_fields(dev);
+       xen_pcibk_config_free_dev(dev);
 
-       psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
-       pci_dev_put(psdev->dev);
+       dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+       pci_dev_put(dev);
 
        kfree(psdev);
 }
@@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev)
        if (err)
                goto config_release;
 
+       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+               struct physdev_pci_device ppdev = {
+                       .seg = pci_domain_nr(dev->bus),
+                       .bus = dev->bus->number,
+                       .devfn = dev->devfn
+               };
+
+               err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
+               if (err)
+                       dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
+                               err);
+       }
+
        /* We need the device active to save the state. */
        dev_dbg(&dev->dev, "save state of device\n");
        pci_save_state(dev);
index cbb09ce9730ac0494dc43fbef5d7d6f2ddd05b0b..5d8ee1319b5c1878c5b82f82bb150dfece7b2cf8 100644 (file)
@@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
-fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw
+fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw
 fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
 fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
                                     ess/maestro3_assp_minisrc.fw
diff --git a/firmware/intel/sd7220.fw.ihex b/firmware/intel/sd7220.fw.ihex
new file mode 100644 (file)
index 0000000..a336363
--- /dev/null
@@ -0,0 +1,513 @@
+:10000000020A29020A87E5E630E6047F0180027FC2
+:1000100000E5E230E4047E0180027E00EE5F6008CD
+:1000200053F9F7E4F5FE80087F0A121731120EA289
+:1000300075FC08E4F5FDE5E720E70343F908220035
+:1000400001201100042000755101E4F552F553F52B
+:1000500052F57E7F04020438C2360552E552D3942D
+:100060000C4005755201D23690070C7407F0A3744A
+:10007000FFF0E4F50CA3F0900714F0A3F0750B204B
+:10008000F509E4F508E508D39430400302040412AE
+:100090000006150BE50870047F0180027F00E5096A
+:1000A00070047E0180027E00EE5F6005121871D23E
+:1000B0003553E1F7E5084509FFE50B25E025E02488
+:1000C00083F582E43407F583EFF085E220E552D32F
+:1000D0009401400D1219F3E054A064407003020330
+:1000E000FB53F9F8909470E4F0E0F510AF09121E9C
+:1000F000B3AF08EF4408F582758380E0F529EF443B
+:1001000007121A3CF5225440D39400401EE52954AE
+:10011000F070211219F3E04480F0E52254306508B4
+:1001200070091219F3E054BFF080091219F37440FA
+:10013000F00203FB121A127583AE74FFF0AF087E53
+:1001400000EF4407F582E0FDE50B25E025E0248182
+:10015000F582E43407F583EDF090070EE004F0EF4C
+:100160004407F582758398E0F528121A23400C1293
+:1001700019F3E04401121A320203F6AF087E00744C
+:1001800080CDEFCD8D82F583E030E00A1219F3E0E7
+:100190004420F00203FB1219F3E054DFF0EE44AE0A
+:1001A000121A4330E4030203FB749E121A0520E086
+:1001B000030203FB8F828E83E020E0030203FB1225
+:1001C00019F3E04410F0E5E320E708E508121A3AD5
+:1001D0004404F0AF087E00EF121A3A20E2341219FC
+:1001E000F3E04408F0E5E430E6047D0180027D00A0
+:1001F000E57EC3940450047C0180027C00EC4D60D9
+:1002000005C2350203FBEE44D2121A434440F00209
+:1002100003FB1219F3E054F7F0121A127583D2E0BF
+:1002200054BFF0900714E004F0E57E7003757E0182
+:10023000AF087E00121A2340121219F3E044011293
+:1002400019F2E05402121A320203FB1219F3E044CD
+:10025000021219F2E054FEF0C235EE448A8F82F5A4
+:1002600083E0F517548F4440F07490FCE508440790
+:10027000FDF5828C83E0543F900702F0E054C08D7E
+:10028000828C83F07492121A05900703121A197463
+:1002900082121A05900704121A1974B4121A0590E2
+:1002A0000705121A197494FEE5084406121A0AF595
+:1002B0001030E004D2378002C237E510547F8F82BD
+:1002C0008E83F0304430121A035480D394004004DB
+:1002D000D2398002C2398F828E83E04480F0121AB4
+:1002E000035440D394004004D23A8002C23A8F8231
+:1002F0008E83E04440F07492FEE5084406121A0A28
+:1003000030E704D2388002C2388F828E83E0547F77
+:10031000F0121E46E4F50A20030280033043031264
+:1003200019952002028003304203120C8F303006F0
+:10033000121995120C8F120D471219F3E054FBF0AD
+:10034000E50AC39401404643E1081219F3E044046E
+:10035000F0E5E420E72A121A127583D2E05408D39C
+:10036000940040047F0180027F00E50AC3940140AD
+:10037000047E0180027E00EF5E6005121DD78017AB
+:10038000121A127583D2E04408F00203FB121A120B
+:100390007583D2E054F7F0121E467F0812173174AD
+:1003A0008EFE121A128E83E0F51054FEF0E5104412
+:1003B00001FFE508FDED4407F582EFF0E51054FE7E
+:1003C000FFED4407F582EF121A11758386E04410A1
+:1003D000121A11E04410F01219F3E054FD4401FF29
+:1003E0001219F3EF121A3230320CE5084408F58284
+:1003F0007583827405F0AF0B1218D774102508F5B9
+:10040000080200850509E509D3940750030200821C
+:10041000E57ED3940040047F0180027F00E57EC327
+:1004200094FA50047E0180027E00EE5F6002057E39
+:1004300030350B43E1017F0912173102005853E1B7
+:10044000FE0200588E6A8F6B8C6C8D6D756E017517
+:100450006F01757001E4F573F574F57590072FF071
+:10046000F53CF53EF546F547F53DF53FF56FE56F93
+:10047000700FE56B456A12072A758380743AF08025
+:100480000912072A758380741AF0E4F56EC3743F6D
+:10049000956EFF120865758382EFF0121A4D1208EF
+:1004A000C6E533F01208FA1208B140E1E56F700BAF
+:1004B00012072A7583807436F0800912072A758323
+:1004C000807416F0756E0112072A7583B4E56EF01C
+:1004D000121A4D743F256EF582E43400F583E5333E
+:1004E000F074BF256EF582E434001208B140D8E400
+:1004F000F570F546F547F56E1208FAF583E0FE1241
+:1005000008C6E07C002400FFEC3EFEAD3BD3EF9D2F
+:10051000EE9C50047B0180027B00E57070047A0140
+:1005200080027A00EB5A6006856E46757001D3EF43
+:100530009DEE9C50047F0180027F00E570B40104B1
+:100540007E0180027E00EF5E6003856E47056EE5EA
+:100550006E647F70A3E5466005E547B47E0385467B
+:1005600047E56F7008854676854777800EC3747FB0
+:100570009546F578C3747F9547F579E56F7037E553
+:10058000466547700C757301757401F53CF53D8047
+:1005900035E4F54EC3E5479546F53CC313F57125A3
+:1005A00046F572C3943F4005E4F53D8040C3743F77
+:1005B0009572F53D8037E5466547700F7573017597
+:1005C0007501F53EF53F754E018022E4F54EC3E519
+:1005D000479546F53EC313F5712546F572D3943F12
+:1005E0005005E4F53F8006E57224C1F53F056FE54F
+:1005F0006FC39402500302046EE56D456C70028077
+:1006000004E574457590072FF07F01E53E6004E531
+:100610003C7014E4F53CF53DF53EF53F1208D27010
+:1006200004F00206A4807AE53CC3953E4007E53C11
+:10063000953EFF8006C3E53E953CFFE576D3957970
+:10064000400585767A800385797AE577C395785079
+:100650000585777B800385787BE57BD3957A403071
+:10066000E57B957AF53CF53EC3E57B957A900719D5
+:10067000F0E53CC313F571257AF572C3943F40054C
+:10068000E4F53D801FC3743F9572F53DF53F80143E
+:10069000E4F53CF53E900719F01208D27003F080A3
+:1006A000037401F01208657583D0E0540FFEAD3C71
+:1006B00070027E07BE0F027E80EEFBEFD39B74803C
+:1006C000F898401FE4F53CF53E1208D27003F08024
+:1006D000127401F0E508FBEB4407F5827583D2E064
+:1006E0004410F0E508FBEB4409F58275839EEDF0BC
+:1006F000EB4407F5827583CAEDF01208657583CC6B
+:10070000EFF022E5084407F5827583BCE054F0F071
+:10071000E5084407F5827583BEE054F0F0E508442F
+:1007200007F5827583C0E054F0F0E5084407F582D0
+:1007300022F0900728E0FEA3E0F5828E8322854216
+:100740004285414185404074C02FF58274023EF5D8
+:1007500083E542F074E02FF58274023EF58322E5D2
+:100760004229FDE433FCE53CC39DEC6480F87480D1
+:100770009822F583E0900722541FFDE0FAA3E0F5EC
+:10078000828A83EDF022900722E0FCA3E0F5828CC0
+:100790008322900724FFED4407CFF0A3EFF02285DA
+:1007A0003838853939853A3A74C02FF58274023E5B
+:1007B000F58322900726FFED4407CFF0A3EFF02248
+:1007C000F074A02FF58274023EF5832274C02511C7
+:1007D000F582E43401F5832274002511F582E434B6
+:1007E00002F5832274602511F582E43403F5832237
+:1007F00074802511F582E43403F5832274E0251119
+:10080000F582E43403F5832274402511F582E43443
+:1008100006F5832274802FF58274023EF58322AFA1
+:10082000087E00EF4407F58222F583E5824407F550
+:1008300082E540F02274402511F582E43402F5830C
+:100840002274C02511F582E43403F5832274002557
+:1008500011F582E43406F5832274202511F582E433
+:100860003406F58322E508FDED4407F58222E541D3
+:10087000F0E56564014564227E00FB7A00FD7C00A2
+:100880002274202511F582E434022274A02511F58A
+:1008900082E4340322853E42853F418F4022853CDD
+:1008A00042853D418F402275453F900720E4F0A3EB
+:1008B00022F583E532F0056EE56EC3944022F0E543
+:1008C000084406F582227400256EF582E43400F5B2
+:1008D0008322E56D456C90072F22E4F9E53CD39522
+:1008E0003E2274802EF582E43402F583E02274A067
+:1008F0002EF582E43402F583E0227480256EF582C1
+:10090000E43400222542FDE433FC22854242854145
+:100910004185404022ED4C60030209E5EF4E7037FF
+:10092000900726120789E0FD1207CCEDF09007280A
+:10093000120789E0FD1207D8EDF0120786E0541F78
+:10094000FD120881F583EDF0900724120789E05429
+:100950001FFD120835EDF0EF64044E703790072646
+:10096000120789E0FD1207E4EDF0900728120789CD
+:10097000E0FD1207F0EDF0120786E0541FFD1208AB
+:100980008BF583EDF0900724120789E0541FFD12C8
+:100990000841EDF0EF64014E70047D0180027D009E
+:1009A000EF64024E70047F0180027F00EF4D60789B
+:1009B000900726120735E0FF1207FCEF120731E01F
+:1009C000FF120808EFF0900722120735E0541FFFCE
+:1009D00012084DEFF0900724120735E0541FFF1264
+:1009E0000859EFF0221207CCE4F01207D8E4F01215
+:1009F0000881F583E4F01208357414F01207E4E47A
+:100A0000F01207F0E4F012088BF583E4F0120841CD
+:100A10007414F01207FCE4F0120808E4F012084D18
+:100A2000E4F01208597414F02253F9F775FC10E43D
+:100A3000F5FD75FE30F5FFE5E720E70343F908E52E
+:100A4000E620E70B78FFE4F6D8FD53E6FE80097850
+:100A500008E4F6D8FD53E6FE758180E4F5A8D2A837
+:100A6000C2A9D2AFE5E220E50520E602800343E11A
+:100A700002E5E220E00E9000007F007E08E4F0A393
+:100A8000DFFCDEFA020ADB43FA01C0E0C0F0C083FB
+:100A9000C082C0D0121CE7D0D0D082D083D0F0D09A
+:100AA000E053FAFE32021B55E493A3F8E493A3F655
+:100AB00008DFF98029E493A3F85407240CC8C33352
+:100AC000C4540F4420C8834004F456800146F6DF26
+:100AD000E4800B010204081020408090003FE47E77
+:100AE000019360C1A3FF543F30E509541FFEE49316
+:100AF000A360010ECF54C025E060AD40B880FE8CED
+:100B0000648D658A668B67E4F569EF4E7003021D9C
+:100B100055E4F568E5674566703212072A758390DB
+:100B2000E41207297583C2E41207297583C4E4120D
+:100B30000870702912072A758392E41207297583B9
+:100B4000C6E41207297583C8E4F0801190072612C5
+:100B50000735E41208707005120732E4F0121D55D3
+:100B6000121EBFE5674566703312072A758390E54C
+:100B7000411207297583C2E5411207297583C41202
+:100B8000086E702912072A758392E54012072975AD
+:100B900083C6E5401207297583C8800E9007261288
+:100BA000073512086E7006120732E540F0AF697E15
+:100BB00000AD67AC6612044412072A7583CAE0D3FD
+:100BC0009400500C0568E568C394055003020B14AB
+:100BD000228C608D611208DA7420400D2FF582742A
+:100BE000033EF583E53EF0800B2FF58274033EF55E
+:100BF00083E53CF0E53CD3953E403CE561456070C3
+:100C000010E9120904E53E120768403B120895807E
+:100C100018E53EC39538401D853E38E53E600585A4
+:100C20003F3980038539398F3A120814E53E12079F
+:100C3000C0E53FF0228043E5614560701912075F0F
+:100C4000400512089E802712090B120814E5421273
+:100C500007C0E541F022E53CC39538401D853C388E
+:100C6000E53C6005853D3980038539398F3A1208A6
+:100C700014E53C1207C0E53DF02285383885393946
+:100C8000853A3A120814E5381207C0E539F0227F98
+:100C900006121731121D23120E04120E33E0440AFD
+:100CA000F0748EFE120E04120E0BEFF0E52830E504
+:100CB00003D38001C3400575142080037514081206
+:100CC0000E0475838AE514F0B4FF05751280800662
+:100CD000E514C313F512E4F516F57F121936121355
+:100CE000A3E50AC3940150090516E516C394144000
+:100CF000EAE5E420E728120E047583D2E05408D315
+:100D0000940040047F0180027F00E50AC394014003
+:100D1000047E0180027E00EF5E6003121DD7E57F36
+:100D2000C394114014120E047583D2E04480F0E5A0
+:100D3000E420E70F121DD7800A120E047583D2E05B
+:100D4000547FF0121D2322748A850882F583E517EB
+:100D5000F0120E3AE4F0900702E0120E177583903D
+:100D6000EFF07492FEE5084407FFF5828E83E054AD
+:100D7000C0FD900703E0543F4D8F828E83F09007B3
+:100D800004E0120E17758382EFF0900705E0FFED87
+:100D90004407F5827583B4EF120E03758380E05427
+:100DA000BFF030370A120E91758394E04480F03022
+:100DB000380A120E91758392E04480F0E52830E401
+:100DC0001A20390A120E04758388E0547FF0203A05
+:100DD0000A120E04758388E054BFF0748CFE120E64
+:100DE000048E83E0540F120E03758386E054BFF027
+:100DF000E5084406120DFD75838AE4F022F582753C
+:100E00008382E4F0E5084407F582228E83E0F51042
+:100E100054FEF0E5104401FFE508FDED4407F582BE
+:100E200022E515C45407FFE508FDED4408F5827579
+:100E3000838222758380E04440F0E5084408F5820F
+:100E400075838A22E51625E025E024AFF582E43497
+:100E50001AF583E493F50D2243E11043E18053E159
+:100E6000FD85E11022E51625E025E024B2F582E4B7
+:100E7000341AF583E49322855582855483E515F071
+:100E800022E5E25420D3940022E5E25440D39400BA
+:100E900022E5084406F58222FDE508FBEB4407F550
+:100EA000822253F9F775FE3022EF4E70261207CCDE
+:100EB000E0FD90072612077B1207D8E0FD90072877
+:100EC00012077B120881120772120835E09007247E
+:100ED000120778EF64044E70291207E4E0FD9007D2
+:100EE0002612077B1207F0E0FD90072812077B12FD
+:100EF000088B120772120841E0541FFD900724125C
+:100F0000077BEF64014E70047D0180027D00EF6479
+:100F1000024E70047F0180027F00EF4D60351207A2
+:100F2000FCE0FF900726120789EFF0120808E0FFA7
+:100F3000900728120789EFF012084DE0541FFF12A6
+:100F40000786EFF0120859E0541FFF90072412079C
+:100F500089EFF022E4F553120E8140047F018002F4
+:100F60007F00120E8940047E0180027E00EE4F70E9
+:100F700003020FF685E11043E10253E10F85E11012
+:100F8000E4F551E5E3543FF552120E89401DAD5290
+:100F9000AF51121118EF600885E11043E140800B5A
+:100FA00053E1BF120E5812000680FBE5E3543FF5F3
+:100FB00051E5E4543FF552120E81401DAD52AF5140
+:100FC000121118EF600885E11043E120800B53E116
+:100FD000DF120E5812000680FB120E8140047F01C2
+:100FE00080027F00120E8940047E0180027E00EEA6
+:100FF0004F6003120E5B22120E21EFF012109122AD
+:1010000002110002104002109000000000000000D9
+:1010100001200120E4F5571216BD121644E4121007
+:10102000561214B7900726120735E4120731E4F080
+:101030001210561214B7900726120735E541120711
+:1010400031E540F0AF577E00AD567C00120444AF4E
+:10105000567E000211EEFF900720A3E0FDE4F55656
+:10106000F540FEFCAB56FA1211517F0F7D18E4F5E6
+:1010700056F540FEFCAB56FA121541AF567E0012F3
+:101080001AFFE4FFF5567D1FF540FEFCAB56FA2231
+:1010900022E4F555E508FD74A0F556ED4407F55733
+:1010A000E52830E503D38001C340057F28EF8004A5
+:1010B0007F14EFC313F554E4F9120E1875838EE014
+:1010C000F510CEEFCEEED394004026E51054FE127C
+:1010D0000E9875838EEDF0E5104401FDEB4407F5A5
+:1010E00082EDF0855782855683E030E301091E804A
+:1010F000D4C234E9C395544002D2342202000622FD
+:10110000303011901000E493F510901010E493F536
+:101110001012109012115022E4FCC3ED9FFAEFF56B
+:101120008375820079FFE493CC6CCCA3D9F8DAF60E
+:10113000E5E230E4028CE5ED24FFFFEF7582FFF578
+:1011400083E4936C70037F01227F00222211000050
+:10115000228E588F598C5A8D5B8A5C8B5D755E012F
+:10116000E4F55FF560F56212072A7583D0E0FFC4ED
+:10117000540FF561121EA585595ED3E55E955BE5BA
+:101180005A12076B504B1207037583BCE0455E1281
+:1011900007297583BEE0455E1207297583C0E045C7
+:1011A0005EF0AF5FE560120878120AFFAF627E0062
+:1011B000AD5DAC5C120444E561AF5E7E00B4030536
+:1011C000121E218007AD5DAC5C121317055E021183
+:1011D0007A1207037583BCE045401207297583BE68
+:1011E000E045401207297583C0E04540F0228E5843
+:1011F0008F59755A017901755B01E4FB12072A7555
+:1012000083AEE0541AFF120865E0C4135407FEEFE2
+:10121000700CEE6535700790072FE0B4010DAF3507
+:101220007E00120EA9CFEBCF021E60E55964024585
+:101230005870047F0180027F00E559455870047E94
+:101240000180027E00EE4F602385414985404BE5D9
+:10125000594558702CAF5AFECDE9CDFCAB59AA5870
+:10126000120AFFAF5B7E00121E608015AF5B7E002E
+:10127000121E60900726120735E549120731E54B2B
+:10128000F0E4FDAF35FEFC120915228C648D651269
+:1012900008DA403CE56545647010120904C3E53E78
+:1012A000120769403B1208958018E53EC395384007
+:1012B0001D853E38E53E6005853F39800385393917
+:1012C0008F3A1207A8E53E120753E53FF022803B14
+:1012D000E5654564701112075F400512089E801F86
+:1012E00012073EE541F022E53CC39538401D853CA0
+:1012F00038E53C6005853D3980038539398F3A12E0
+:1013000007A8E53C120753E53DF02212079FE53898
+:10131000120753E539F0228C638D641208DA403CE1
+:10132000E56445637010120904C3E53E1207694085
+:101330003B1208958018E53EC39538401D853E3820
+:10134000E53E6005853F3980038539398F3A1207BC
+:10135000A8E53E120753E53FF022803BE564456374
+:10136000701112075F400512089E801F12073EE5AC
+:1013700041F022E53CC39538401D853C38E53C6092
+:1013800005853D3980038539398F3A1207A8E53C38
+:10139000120753E53DF02212079FE538120753E587
+:1013A00039F022E50DFEE5088E544405F555751516
+:1013B0000FF582120E7A1217A320310575150380DE
+:1013C0000375150BE50AC39401503812142020311F
+:1013D0000605150515800415151515E50AC39401B4
+:1013E0005021121420203104051580021515E50A3C
+:1013F000C39401500E120E771217A3203105051564
+:10140000120E77E515B408047F0180027F00E51510
+:10141000B407047E0180027E00EE4F6002057F2249
+:10142000855582855483E515F01217A32212072AE9
+:101430007583AE74FF120729E0541AF534E0C41323
+:101440005407F53524FE602424FE603C24047063B8
+:1014500075312DE508FD74B612079274BC90072211
+:1014600012079574901207B37492803C75313AE577
+:1014700008FD74BA12079274C09007221207B6745E
+:10148000C41207B374C88020753135E508FD74B8FF
+:1014900012079274BEFFED4407900722CFF0A3EF2E
+:1014A000F074C21207B374C6FFED4407A3CFF0A3D4
+:1014B000EFF022753401228E588F598C5A8D5B8A39
+:1014C0005C8B5D755E01E4F55F121EA585595ED3E8
+:1014D000E55E955BE55A12076B5057E55D455C701C
+:1014E0003012072A758392E55E1207297583C6E5D7
+:1014F0005E1207297583C8E55E120729758390E59A
+:101500005E1207297583C2E55E1207297583C480C0
+:1015100003120732E55EF0AF5F7E00AD5DAC5C129A
+:101520000444AF5E7E00AD5DAC5C120BD1055E0283
+:1015300014CFAB5DAA5CAD5BAC5AAF59AE58021B81
+:10154000FB8C5C8D5D8A5E8B5F756001E4F561F5F7
+:1015500062F563121EA58F60D3E560955DE55C12B0
+:10156000076B5061E55F455E702712072A7583B6E9
+:10157000E5601207297583B8E5601207297583BAFB
+:10158000E560F0AF617E00E56212087A120AFF8022
+:1015900019900724120735E56012072975838EE438
+:1015A0001207297401120729E4F0AF637E00AD5FD2
+:1015B000AC5E120444AF607E00AD5FAC5E12128B75
+:1015C00005600215582290114DE49390072EF012F9
+:1015D000081F7583AEE0541AF5347067EF4407F5C1
+:1015E000827583CEE0FF1313135407F536540FD3DF
+:1015F0009400400612142D121BA9E536540F24FE48
+:10160000600C14600C146019240370378010021EE3
+:1016100091121E9112072A7583CEE054EFF0021D3D
+:10162000AE121014E4F555121D850555E555C39409
+:101630000540F412072A7583CEE054C7120729E04B
+:101640004408F022E4F558F559AF08EF4407F58255
+:101650007583D0E0FDC4540FF55AEF4407F5827549
+:1016600083807401F0120821758382E545F0EF4410
+:1016700007F58275838A74FFF0121A4D12072A75D6
+:1016800083BCE054EF1207297583BEE054EF1207C4
+:10169000297583C0E054EF1207297583BCE044101C
+:1016A0001207297583BEE044101207297583C0E034
+:1016B0004410F0AF58E559120878020AFFE4F558D3
+:1016C0007D01F559AF35FEFC12091512072A758305
+:1016D000B674101207297583B87410120729758320
+:1016E000BA74101207297583BC7410120729758308
+:1016F000BE74101207297583C074101207297583F0
+:1017000090E41207297583C2E41207297583C4E4A3
+:10171000120729758392E41207297583C6E412071C
+:10172000297583C8E4F0AF58FEE55912087A020A19
+:10173000FFE5E230E46CE5E754C064407064E5091D
+:10174000C45430FEE50825E025E054C04EFEEF54B9
+:101750003F4EFDE52BAE2A7802C333CE33CED8F907
+:10176000F5828E83EDF0E52BAE2A7802C333CE33BB
+:10177000CED8F9FFF5828E83A3E5FEF08F828E83AB
+:10178000A3A3E5FDF08F828E83A3A3A3E5FCF0C3A2
+:10179000E52B94FAE52A94005008052BE52B7002FE
+:1017A000052A22E4FFE4F558F556F5577482FC1239
+:1017B0000E048C83E0F510547FF0E5104480120E87
+:1017C00098EDF07E0A120E047583A0E020E026DE7C
+:1017D000F40557E55770020556E5142401FDE4337E
+:1017E000FCD3E5579DE5569C40D9E50A942050026C
+:1017F000050A43E108C231120E047583A6E05512B2
+:1018000065127003D23122C23122900726E0FAA37A
+:10181000E0F5828A83E0F541E539C395414026E54C
+:10182000399541C39FEE12076B40047C0180027C16
+:1018300000E541643F60047B0180027B00EC5B605B
+:101840002905418028C3E5419539C39FEE12076BF6
+:1018500040047F0180027F00E54160047E01800238
+:101860007E00EF5E600415418003853941853A4072
+:1018700022E5E230E460E5E130E25BE50970047FF7
+:101880000180027F00E50870047E0180027E00EE88
+:101890005F604353F9F8E5E230E43BE5E130E22EE6
+:1018A00043FA0253FAFBE4F510909470E510F0E56A
+:1018B000E130E2E7909470E06510600343FA0405BC
+:1018C00010909470E510F070E612000680E153FA73
+:1018D000FD53FAFB80C0228F54120006E5E130E090
+:1018E000047F0180027F00E57ED3940540047E01E1
+:1018F00080027E00EE4F603D855411E5E220E1322A
+:1019000074CE121A0530E7047D0180027D008F82BB
+:101910008E83E030E6047F0180027F00EF5D70156A
+:101920001215C674CE121A0530E607E04480F04363
+:10193000F98012187122120E44E51625E025E024E4
+:10194000B0F582E4341AF583E493F50FE51625E04B
+:1019500025E024B1F582E4341AF583E493F50E1200
+:101960000E65F510E50F54F0120E1775838CEFF02D
+:10197000E50F30E00C120E04758386E04440F080E1
+:101980000A120E04758386E054BFF0120E9175831F
+:1019900082E50EF0227F05121731120E04120E336B
+:1019A0007402F0748EFE120E04120E0BEFF0751519
+:1019B00070120FF72034057515108003751550123D
+:1019C0000FF72034047410800274F02515F51512F9
+:1019D0000E21EFF0121091203417E5156430600CE1
+:1019E00074102515F515B48003E4F515120E21EFDA
+:1019F000F022F0E50B25E025E02482F582E43407AF
+:101A0000F583227488FEE5084407FFF5828E83E0A3
+:101A100022F0E5084407F58222F0E054C08F828E60
+:101A200083F022EF4407F582758386E05410D39447
+:101A30000022F0900715E004F0224406F582758339
+:101A40009EE022FEEF4407F5828E83E022E49007B9
+:101A50002AF0A3F012072A758382E0547F12072927
+:101A6000E04480F01210FC12081F7583A0E020E013
+:101A70001A90072BE004F0700690072AE004F0901B
+:101A8000072AE0B410E1A3E0B400DCEE44A6FCEFCA
+:101A90004407F5828C83E0F532EE44A8FEEF44075C
+:101AA000F5828E83E0F5332201201100042000909E
+:101AB00000200F9200210F9400220F9600230F9810
+:101AC00000240F9A00250F9C00260F9E00270FA0D0
+:101AD000012001A2012101A4012201A6012301A8E4
+:101AE000012401AA012501AC012601AE012701B0A4
+:101AF000012801B400280FB640280FB8612801CB97
+:101B0000EFCBCAEECA7F01E4FDEB4A7024E508F58D
+:101B10008274B6120829E508F58274B8120829E51E
+:101B200008F58274BA1208297E007C00120AFF8030
+:101B300012900726120735E541F090072412073569
+:101B4000E540F012072A75838EE41207297401120A
+:101B50000729E4F022E4F526F52753E1FEF52A757E
+:101B60002B01F5087F0112173130301C901AA9E4BF
+:101B700093F510901FF9E493F510900041E493F56C
+:101B800010901ECAE493F5107F02121731120F5401
+:101B90007F03121731120006E5E230E70912100048
+:101BA00030300312110002004712081F7583D0E085
+:101BB000C4540FFD7543017544FF1208AA7404F064
+:101BC000753B01ED14600C14600B14600F2403705E
+:101BD0000B800980001208A704F080061208A77481
+:101BE00004F0EE4482FEEF4407F5828E83E5451251
+:101BF00008BE758382E531F002114C8E608F611250
+:101C00001EA5E4FFCEEDCEEED39561E56012076B25
+:101C1000403974202EF582E43403F583E07003FF2D
+:101C200080261208E2FDC39F401ECFEDCFEB4A7025
+:101C30000B8D421208EEF5418E40800C1208E2F541
+:101C4000381208EEF5398E3A1E80BC22755801E52F
+:101C500035700C1207CCE0F54A1207D8E0F54CE5D8
+:101C600035B4040C1207E4E0F54A1207F0E0F54C35
+:101C7000E535B401047F0180027F00E535B402043C
+:101C80007E0180027E00EE4F600C1207FCE0F54AF8
+:101C9000120808E0F54C85414985404B22755B01EF
+:101CA000900724120735E0541FFFD3940250048F8D
+:101CB000588005EF24FEF558EFC394184005755978
+:101CC000188004EF04F55985435AAF587E00AD598A
+:101CD0007C00AB5B7A00121541AF5A7E0012180AE5
+:101CE000AF5B7E00021AFFE5E230E70E121003C27E
+:101CF000303030031210FF203328E5E730E70512BB
+:101D00000EA2800DE5FEC394205006120EA243F9E8
+:101D100008E5F230E70353F97FE5F15470D39400FE
+:101D200050D822120E04758380E4F0E508440712AF
+:101D30000DFD758384120E02758386120E02758363
+:101D40008CE054F3120E0375838E120E0275839489
+:101D5000E054FBF02212072A75838EE412072974DF
+:101D600001120729E41208BE75838CE04420120892
+:101D7000BEE054DFF07484850882F583E0547FF080
+:101D8000E04480F022755601E4FDF557AF35FEFCC6
+:101D9000120915121C9D121E7A121C4CAF577E00A0
+:101DA000AD567C00120444AF567E000211EE75560B
+:101DB00001E4FDF557AF35FEFC120915121C9D120A
+:101DC0001E7A121C4CAF577E00AD567C00120444A4
+:101DD000AF567E000211EEE4F516120E44FEE50841
+:101DE0004405FF120E658F828E83F00516E516C33B
+:101DF000941440E6E508120E2BE4F022E4F558F5C1
+:101E000059F55AFFFEAD58FC1209157F047E00AD4E
+:101E1000587C001209157F027E00AD587C00020933
+:101E200015E53C253EFCE5422400FBE433FAECC317
+:101E30009BEA12076B400B8C42E53D253FF5418F35
+:101E4000402212090B227484F5188508198519821D
+:101E5000851883E0547FF0E04480F0E04480F02275
+:101E6000EF4E700B12072A7583D2E054DFF0221276
+:101E7000072A7583D2E04420F02275580190072686
+:101E8000120735E0543FF541120732E0543FF54068
+:101E900022755602E4F557121DFCAF577E00AD5671
+:101EA0007C00020444E4F542F541F540F538F5398B
+:101EB000F53A22EF5407FFE5F954F84FF5F9227F80
+:101EC00001E4FE0F0EBEFFFB2201200001042000F2
+:101ED0000000000000000000000000000000000002
+:101EE00000000000000000000000000000000000F2
+:101EF00000000000000000000000000000000000E2
+:101F000000000000000000000000000000000000D1
+:101F100000000000000000000000000000000000C1
+:101F200000000000000000000000000000000000B1
+:101F300000000000000000000000000000000000A1
+:101F40000000000000000000000000000000000091
+:101F50000000000000000000000000000000000081
+:101F60000000000000000000000000000000000071
+:101F70000000000000000000000000000000000061
+:101F80000000000000000000000000000000000051
+:101F90000000000000000000000000000000000041
+:101FA0000000000000000000000000000000000031
+:101FB0000000000000000000000000000000000021
+:101FC0000000000000000000000000000000000011
+:101FD0000000000000000000000000000000000001
+:101FE00000000000000000000000000000000000F1
+:101FF000000000000000000001201100042000810A
+:00000001FF
diff --git a/firmware/qlogic/sd7220.fw.ihex b/firmware/qlogic/sd7220.fw.ihex
deleted file mode 100644 (file)
index a336363..0000000
+++ /dev/null
@@ -1,513 +0,0 @@
-:10000000020A29020A87E5E630E6047F0180027FC2
-:1000100000E5E230E4047E0180027E00EE5F6008CD
-:1000200053F9F7E4F5FE80087F0A121731120EA289
-:1000300075FC08E4F5FDE5E720E70343F908220035
-:1000400001201100042000755101E4F552F553F52B
-:1000500052F57E7F04020438C2360552E552D3942D
-:100060000C4005755201D23690070C7407F0A3744A
-:10007000FFF0E4F50CA3F0900714F0A3F0750B204B
-:10008000F509E4F508E508D39430400302040412AE
-:100090000006150BE50870047F0180027F00E5096A
-:1000A00070047E0180027E00EE5F6005121871D23E
-:1000B0003553E1F7E5084509FFE50B25E025E02488
-:1000C00083F582E43407F583EFF085E220E552D32F
-:1000D0009401400D1219F3E054A064407003020330
-:1000E000FB53F9F8909470E4F0E0F510AF09121E9C
-:1000F000B3AF08EF4408F582758380E0F529EF443B
-:1001000007121A3CF5225440D39400401EE52954AE
-:10011000F070211219F3E04480F0E52254306508B4
-:1001200070091219F3E054BFF080091219F37440FA
-:10013000F00203FB121A127583AE74FFF0AF087E53
-:1001400000EF4407F582E0FDE50B25E025E0248182
-:10015000F582E43407F583EDF090070EE004F0EF4C
-:100160004407F582758398E0F528121A23400C1293
-:1001700019F3E04401121A320203F6AF087E00744C
-:1001800080CDEFCD8D82F583E030E00A1219F3E0E7
-:100190004420F00203FB1219F3E054DFF0EE44AE0A
-:1001A000121A4330E4030203FB749E121A0520E086
-:1001B000030203FB8F828E83E020E0030203FB1225
-:1001C00019F3E04410F0E5E320E708E508121A3AD5
-:1001D0004404F0AF087E00EF121A3A20E2341219FC
-:1001E000F3E04408F0E5E430E6047D0180027D00A0
-:1001F000E57EC3940450047C0180027C00EC4D60D9
-:1002000005C2350203FBEE44D2121A434440F00209
-:1002100003FB1219F3E054F7F0121A127583D2E0BF
-:1002200054BFF0900714E004F0E57E7003757E0182
-:10023000AF087E00121A2340121219F3E044011293
-:1002400019F2E05402121A320203FB1219F3E044CD
-:10025000021219F2E054FEF0C235EE448A8F82F5A4
-:1002600083E0F517548F4440F07490FCE508440790
-:10027000FDF5828C83E0543F900702F0E054C08D7E
-:10028000828C83F07492121A05900703121A197463
-:1002900082121A05900704121A1974B4121A0590E2
-:1002A0000705121A197494FEE5084406121A0AF595
-:1002B0001030E004D2378002C237E510547F8F82BD
-:1002C0008E83F0304430121A035480D394004004DB
-:1002D000D2398002C2398F828E83E04480F0121AB4
-:1002E000035440D394004004D23A8002C23A8F8231
-:1002F0008E83E04440F07492FEE5084406121A0A28
-:1003000030E704D2388002C2388F828E83E0547F77
-:10031000F0121E46E4F50A20030280033043031264
-:1003200019952002028003304203120C8F303006F0
-:10033000121995120C8F120D471219F3E054FBF0AD
-:10034000E50AC39401404643E1081219F3E044046E
-:10035000F0E5E420E72A121A127583D2E05408D39C
-:10036000940040047F0180027F00E50AC3940140AD
-:10037000047E0180027E00EF5E6005121DD78017AB
-:10038000121A127583D2E04408F00203FB121A120B
-:100390007583D2E054F7F0121E467F0812173174AD
-:1003A0008EFE121A128E83E0F51054FEF0E5104412
-:1003B00001FFE508FDED4407F582EFF0E51054FE7E
-:1003C000FFED4407F582EF121A11758386E04410A1
-:1003D000121A11E04410F01219F3E054FD4401FF29
-:1003E0001219F3EF121A3230320CE5084408F58284
-:1003F0007583827405F0AF0B1218D774102508F5B9
-:10040000080200850509E509D3940750030200821C
-:10041000E57ED3940040047F0180027F00E57EC327
-:1004200094FA50047E0180027E00EE5F6002057E39
-:1004300030350B43E1017F0912173102005853E1B7
-:10044000FE0200588E6A8F6B8C6C8D6D756E017517
-:100450006F01757001E4F573F574F57590072FF071
-:10046000F53CF53EF546F547F53DF53FF56FE56F93
-:10047000700FE56B456A12072A758380743AF08025
-:100480000912072A758380741AF0E4F56EC3743F6D
-:10049000956EFF120865758382EFF0121A4D1208EF
-:1004A000C6E533F01208FA1208B140E1E56F700BAF
-:1004B00012072A7583807436F0800912072A758323
-:1004C000807416F0756E0112072A7583B4E56EF01C
-:1004D000121A4D743F256EF582E43400F583E5333E
-:1004E000F074BF256EF582E434001208B140D8E400
-:1004F000F570F546F547F56E1208FAF583E0FE1241
-:1005000008C6E07C002400FFEC3EFEAD3BD3EF9D2F
-:10051000EE9C50047B0180027B00E57070047A0140
-:1005200080027A00EB5A6006856E46757001D3EF43
-:100530009DEE9C50047F0180027F00E570B40104B1
-:100540007E0180027E00EF5E6003856E47056EE5EA
-:100550006E647F70A3E5466005E547B47E0385467B
-:1005600047E56F7008854676854777800EC3747FB0
-:100570009546F578C3747F9547F579E56F7037E553
-:10058000466547700C757301757401F53CF53D8047
-:1005900035E4F54EC3E5479546F53CC313F57125A3
-:1005A00046F572C3943F4005E4F53D8040C3743F77
-:1005B0009572F53D8037E5466547700F7573017597
-:1005C0007501F53EF53F754E018022E4F54EC3E519
-:1005D000479546F53EC313F5712546F572D3943F12
-:1005E0005005E4F53F8006E57224C1F53F056FE54F
-:1005F0006FC39402500302046EE56D456C70028077
-:1006000004E574457590072FF07F01E53E6004E531
-:100610003C7014E4F53CF53DF53EF53F1208D27010
-:1006200004F00206A4807AE53CC3953E4007E53C11
-:10063000953EFF8006C3E53E953CFFE576D3957970
-:10064000400585767A800385797AE577C395785079
-:100650000585777B800385787BE57BD3957A403071
-:10066000E57B957AF53CF53EC3E57B957A900719D5
-:10067000F0E53CC313F571257AF572C3943F40054C
-:10068000E4F53D801FC3743F9572F53DF53F80143E
-:10069000E4F53CF53E900719F01208D27003F080A3
-:1006A000037401F01208657583D0E0540FFEAD3C71
-:1006B00070027E07BE0F027E80EEFBEFD39B74803C
-:1006C000F898401FE4F53CF53E1208D27003F08024
-:1006D000127401F0E508FBEB4407F5827583D2E064
-:1006E0004410F0E508FBEB4409F58275839EEDF0BC
-:1006F000EB4407F5827583CAEDF01208657583CC6B
-:10070000EFF022E5084407F5827583BCE054F0F071
-:10071000E5084407F5827583BEE054F0F0E508442F
-:1007200007F5827583C0E054F0F0E5084407F582D0
-:1007300022F0900728E0FEA3E0F5828E8322854216
-:100740004285414185404074C02FF58274023EF5D8
-:1007500083E542F074E02FF58274023EF58322E5D2
-:100760004229FDE433FCE53CC39DEC6480F87480D1
-:100770009822F583E0900722541FFDE0FAA3E0F5EC
-:10078000828A83EDF022900722E0FCA3E0F5828CC0
-:100790008322900724FFED4407CFF0A3EFF02285DA
-:1007A0003838853939853A3A74C02FF58274023E5B
-:1007B000F58322900726FFED4407CFF0A3EFF02248
-:1007C000F074A02FF58274023EF5832274C02511C7
-:1007D000F582E43401F5832274002511F582E434B6
-:1007E00002F5832274602511F582E43403F5832237
-:1007F00074802511F582E43403F5832274E0251119
-:10080000F582E43403F5832274402511F582E43443
-:1008100006F5832274802FF58274023EF58322AFA1
-:10082000087E00EF4407F58222F583E5824407F550
-:1008300082E540F02274402511F582E43402F5830C
-:100840002274C02511F582E43403F5832274002557
-:1008500011F582E43406F5832274202511F582E433
-:100860003406F58322E508FDED4407F58222E541D3
-:10087000F0E56564014564227E00FB7A00FD7C00A2
-:100880002274202511F582E434022274A02511F58A
-:1008900082E4340322853E42853F418F4022853CDD
-:1008A00042853D418F402275453F900720E4F0A3EB
-:1008B00022F583E532F0056EE56EC3944022F0E543
-:1008C000084406F582227400256EF582E43400F5B2
-:1008D0008322E56D456C90072F22E4F9E53CD39522
-:1008E0003E2274802EF582E43402F583E02274A067
-:1008F0002EF582E43402F583E0227480256EF582C1
-:10090000E43400222542FDE433FC22854242854145
-:100910004185404022ED4C60030209E5EF4E7037FF
-:10092000900726120789E0FD1207CCEDF09007280A
-:10093000120789E0FD1207D8EDF0120786E0541F78
-:10094000FD120881F583EDF0900724120789E05429
-:100950001FFD120835EDF0EF64044E703790072646
-:10096000120789E0FD1207E4EDF0900728120789CD
-:10097000E0FD1207F0EDF0120786E0541FFD1208AB
-:100980008BF583EDF0900724120789E0541FFD12C8
-:100990000841EDF0EF64014E70047D0180027D009E
-:1009A000EF64024E70047F0180027F00EF4D60789B
-:1009B000900726120735E0FF1207FCEF120731E01F
-:1009C000FF120808EFF0900722120735E0541FFFCE
-:1009D00012084DEFF0900724120735E0541FFF1264
-:1009E0000859EFF0221207CCE4F01207D8E4F01215
-:1009F0000881F583E4F01208357414F01207E4E47A
-:100A0000F01207F0E4F012088BF583E4F0120841CD
-:100A10007414F01207FCE4F0120808E4F012084D18
-:100A2000E4F01208597414F02253F9F775FC10E43D
-:100A3000F5FD75FE30F5FFE5E720E70343F908E52E
-:100A4000E620E70B78FFE4F6D8FD53E6FE80097850
-:100A500008E4F6D8FD53E6FE758180E4F5A8D2A837
-:100A6000C2A9D2AFE5E220E50520E602800343E11A
-:100A700002E5E220E00E9000007F007E08E4F0A393
-:100A8000DFFCDEFA020ADB43FA01C0E0C0F0C083FB
-:100A9000C082C0D0121CE7D0D0D082D083D0F0D09A
-:100AA000E053FAFE32021B55E493A3F8E493A3F655
-:100AB00008DFF98029E493A3F85407240CC8C33352
-:100AC000C4540F4420C8834004F456800146F6DF26
-:100AD000E4800B010204081020408090003FE47E77
-:100AE000019360C1A3FF543F30E509541FFEE49316
-:100AF000A360010ECF54C025E060AD40B880FE8CED
-:100B0000648D658A668B67E4F569EF4E7003021D9C
-:100B100055E4F568E5674566703212072A758390DB
-:100B2000E41207297583C2E41207297583C4E4120D
-:100B30000870702912072A758392E41207297583B9
-:100B4000C6E41207297583C8E4F0801190072612C5
-:100B50000735E41208707005120732E4F0121D55D3
-:100B6000121EBFE5674566703312072A758390E54C
-:100B7000411207297583C2E5411207297583C41202
-:100B8000086E702912072A758392E54012072975AD
-:100B900083C6E5401207297583C8800E9007261288
-:100BA000073512086E7006120732E540F0AF697E15
-:100BB00000AD67AC6612044412072A7583CAE0D3FD
-:100BC0009400500C0568E568C394055003020B14AB
-:100BD000228C608D611208DA7420400D2FF582742A
-:100BE000033EF583E53EF0800B2FF58274033EF55E
-:100BF00083E53CF0E53CD3953E403CE561456070C3
-:100C000010E9120904E53E120768403B120895807E
-:100C100018E53EC39538401D853E38E53E600585A4
-:100C20003F3980038539398F3A120814E53E12079F
-:100C3000C0E53FF0228043E5614560701912075F0F
-:100C4000400512089E802712090B120814E5421273
-:100C500007C0E541F022E53CC39538401D853C388E
-:100C6000E53C6005853D3980038539398F3A1208A6
-:100C700014E53C1207C0E53DF02285383885393946
-:100C8000853A3A120814E5381207C0E539F0227F98
-:100C900006121731121D23120E04120E33E0440AFD
-:100CA000F0748EFE120E04120E0BEFF0E52830E504
-:100CB00003D38001C3400575142080037514081206
-:100CC0000E0475838AE514F0B4FF05751280800662
-:100CD000E514C313F512E4F516F57F121936121355
-:100CE000A3E50AC3940150090516E516C394144000
-:100CF000EAE5E420E728120E047583D2E05408D315
-:100D0000940040047F0180027F00E50AC394014003
-:100D1000047E0180027E00EF5E6003121DD7E57F36
-:100D2000C394114014120E047583D2E04480F0E5A0
-:100D3000E420E70F121DD7800A120E047583D2E05B
-:100D4000547FF0121D2322748A850882F583E517EB
-:100D5000F0120E3AE4F0900702E0120E177583903D
-:100D6000EFF07492FEE5084407FFF5828E83E054AD
-:100D7000C0FD900703E0543F4D8F828E83F09007B3
-:100D800004E0120E17758382EFF0900705E0FFED87
-:100D90004407F5827583B4EF120E03758380E05427
-:100DA000BFF030370A120E91758394E04480F03022
-:100DB000380A120E91758392E04480F0E52830E401
-:100DC0001A20390A120E04758388E0547FF0203A05
-:100DD0000A120E04758388E054BFF0748CFE120E64
-:100DE000048E83E0540F120E03758386E054BFF027
-:100DF000E5084406120DFD75838AE4F022F582753C
-:100E00008382E4F0E5084407F582228E83E0F51042
-:100E100054FEF0E5104401FFE508FDED4407F582BE
-:100E200022E515C45407FFE508FDED4408F5827579
-:100E3000838222758380E04440F0E5084408F5820F
-:100E400075838A22E51625E025E024AFF582E43497
-:100E50001AF583E493F50D2243E11043E18053E159
-:100E6000FD85E11022E51625E025E024B2F582E4B7
-:100E7000341AF583E49322855582855483E515F071
-:100E800022E5E25420D3940022E5E25440D39400BA
-:100E900022E5084406F58222FDE508FBEB4407F550
-:100EA000822253F9F775FE3022EF4E70261207CCDE
-:100EB000E0FD90072612077B1207D8E0FD90072877
-:100EC00012077B120881120772120835E09007247E
-:100ED000120778EF64044E70291207E4E0FD9007D2
-:100EE0002612077B1207F0E0FD90072812077B12FD
-:100EF000088B120772120841E0541FFD900724125C
-:100F0000077BEF64014E70047D0180027D00EF6479
-:100F1000024E70047F0180027F00EF4D60351207A2
-:100F2000FCE0FF900726120789EFF0120808E0FFA7
-:100F3000900728120789EFF012084DE0541FFF12A6
-:100F40000786EFF0120859E0541FFF90072412079C
-:100F500089EFF022E4F553120E8140047F018002F4
-:100F60007F00120E8940047E0180027E00EE4F70E9
-:100F700003020FF685E11043E10253E10F85E11012
-:100F8000E4F551E5E3543FF552120E89401DAD5290
-:100F9000AF51121118EF600885E11043E140800B5A
-:100FA00053E1BF120E5812000680FBE5E3543FF5F3
-:100FB00051E5E4543FF552120E81401DAD52AF5140
-:100FC000121118EF600885E11043E120800B53E116
-:100FD000DF120E5812000680FB120E8140047F01C2
-:100FE00080027F00120E8940047E0180027E00EEA6
-:100FF0004F6003120E5B22120E21EFF012109122AD
-:1010000002110002104002109000000000000000D9
-:1010100001200120E4F5571216BD121644E4121007
-:10102000561214B7900726120735E4120731E4F080
-:101030001210561214B7900726120735E541120711
-:1010400031E540F0AF577E00AD567C00120444AF4E
-:10105000567E000211EEFF900720A3E0FDE4F55656
-:10106000F540FEFCAB56FA1211517F0F7D18E4F5E6
-:1010700056F540FEFCAB56FA121541AF567E0012F3
-:101080001AFFE4FFF5567D1FF540FEFCAB56FA2231
-:1010900022E4F555E508FD74A0F556ED4407F55733
-:1010A000E52830E503D38001C340057F28EF8004A5
-:1010B0007F14EFC313F554E4F9120E1875838EE014
-:1010C000F510CEEFCEEED394004026E51054FE127C
-:1010D0000E9875838EEDF0E5104401FDEB4407F5A5
-:1010E00082EDF0855782855683E030E301091E804A
-:1010F000D4C234E9C395544002D2342202000622FD
-:10110000303011901000E493F510901010E493F536
-:101110001012109012115022E4FCC3ED9FFAEFF56B
-:101120008375820079FFE493CC6CCCA3D9F8DAF60E
-:10113000E5E230E4028CE5ED24FFFFEF7582FFF578
-:1011400083E4936C70037F01227F00222211000050
-:10115000228E588F598C5A8D5B8A5C8B5D755E012F
-:10116000E4F55FF560F56212072A7583D0E0FFC4ED
-:10117000540FF561121EA585595ED3E55E955BE5BA
-:101180005A12076B504B1207037583BCE0455E1281
-:1011900007297583BEE0455E1207297583C0E045C7
-:1011A0005EF0AF5FE560120878120AFFAF627E0062
-:1011B000AD5DAC5C120444E561AF5E7E00B4030536
-:1011C000121E218007AD5DAC5C121317055E021183
-:1011D0007A1207037583BCE045401207297583BE68
-:1011E000E045401207297583C0E04540F0228E5843
-:1011F0008F59755A017901755B01E4FB12072A7555
-:1012000083AEE0541AFF120865E0C4135407FEEFE2
-:10121000700CEE6535700790072FE0B4010DAF3507
-:101220007E00120EA9CFEBCF021E60E55964024585
-:101230005870047F0180027F00E559455870047E94
-:101240000180027E00EE4F602385414985404BE5D9
-:10125000594558702CAF5AFECDE9CDFCAB59AA5870
-:10126000120AFFAF5B7E00121E608015AF5B7E002E
-:10127000121E60900726120735E549120731E54B2B
-:10128000F0E4FDAF35FEFC120915228C648D651269
-:1012900008DA403CE56545647010120904C3E53E78
-:1012A000120769403B1208958018E53EC395384007
-:1012B0001D853E38E53E6005853F39800385393917
-:1012C0008F3A1207A8E53E120753E53FF022803B14
-:1012D000E5654564701112075F400512089E801F86
-:1012E00012073EE541F022E53CC39538401D853CA0
-:1012F00038E53C6005853D3980038539398F3A12E0
-:1013000007A8E53C120753E53DF02212079FE53898
-:10131000120753E539F0228C638D641208DA403CE1
-:10132000E56445637010120904C3E53E1207694085
-:101330003B1208958018E53EC39538401D853E3820
-:10134000E53E6005853F3980038539398F3A1207BC
-:10135000A8E53E120753E53FF022803BE564456374
-:10136000701112075F400512089E801F12073EE5AC
-:1013700041F022E53CC39538401D853C38E53C6092
-:1013800005853D3980038539398F3A1207A8E53C38
-:10139000120753E53DF02212079FE538120753E587
-:1013A00039F022E50DFEE5088E544405F555751516
-:1013B0000FF582120E7A1217A320310575150380DE
-:1013C0000375150BE50AC39401503812142020311F
-:1013D0000605150515800415151515E50AC39401B4
-:1013E0005021121420203104051580021515E50A3C
-:1013F000C39401500E120E771217A3203105051564
-:10140000120E77E515B408047F0180027F00E51510
-:10141000B407047E0180027E00EE4F6002057F2249
-:10142000855582855483E515F01217A32212072AE9
-:101430007583AE74FF120729E0541AF534E0C41323
-:101440005407F53524FE602424FE603C24047063B8
-:1014500075312DE508FD74B612079274BC90072211
-:1014600012079574901207B37492803C75313AE577
-:1014700008FD74BA12079274C09007221207B6745E
-:10148000C41207B374C88020753135E508FD74B8FF
-:1014900012079274BEFFED4407900722CFF0A3EF2E
-:1014A000F074C21207B374C6FFED4407A3CFF0A3D4
-:1014B000EFF022753401228E588F598C5A8D5B8A39
-:1014C0005C8B5D755E01E4F55F121EA585595ED3E8
-:1014D000E55E955BE55A12076B5057E55D455C701C
-:1014E0003012072A758392E55E1207297583C6E5D7
-:1014F0005E1207297583C8E55E120729758390E59A
-:101500005E1207297583C2E55E1207297583C480C0
-:1015100003120732E55EF0AF5F7E00AD5DAC5C129A
-:101520000444AF5E7E00AD5DAC5C120BD1055E0283
-:1015300014CFAB5DAA5CAD5BAC5AAF59AE58021B81
-:10154000FB8C5C8D5D8A5E8B5F756001E4F561F5F7
-:1015500062F563121EA58F60D3E560955DE55C12B0
-:10156000076B5061E55F455E702712072A7583B6E9
-:10157000E5601207297583B8E5601207297583BAFB
-:10158000E560F0AF617E00E56212087A120AFF8022
-:1015900019900724120735E56012072975838EE438
-:1015A0001207297401120729E4F0AF637E00AD5FD2
-:1015B000AC5E120444AF607E00AD5FAC5E12128B75
-:1015C00005600215582290114DE49390072EF012F9
-:1015D000081F7583AEE0541AF5347067EF4407F5C1
-:1015E000827583CEE0FF1313135407F536540FD3DF
-:1015F0009400400612142D121BA9E536540F24FE48
-:10160000600C14600C146019240370378010021EE3
-:1016100091121E9112072A7583CEE054EFF0021D3D
-:10162000AE121014E4F555121D850555E555C39409
-:101630000540F412072A7583CEE054C7120729E04B
-:101640004408F022E4F558F559AF08EF4407F58255
-:101650007583D0E0FDC4540FF55AEF4407F5827549
-:1016600083807401F0120821758382E545F0EF4410
-:1016700007F58275838A74FFF0121A4D12072A75D6
-:1016800083BCE054EF1207297583BEE054EF1207C4
-:10169000297583C0E054EF1207297583BCE044101C
-:1016A0001207297583BEE044101207297583C0E034
-:1016B0004410F0AF58E559120878020AFFE4F558D3
-:1016C0007D01F559AF35FEFC12091512072A758305
-:1016D000B674101207297583B87410120729758320
-:1016E000BA74101207297583BC7410120729758308
-:1016F000BE74101207297583C074101207297583F0
-:1017000090E41207297583C2E41207297583C4E4A3
-:10171000120729758392E41207297583C6E412071C
-:10172000297583C8E4F0AF58FEE55912087A020A19
-:10173000FFE5E230E46CE5E754C064407064E5091D
-:10174000C45430FEE50825E025E054C04EFEEF54B9
-:101750003F4EFDE52BAE2A7802C333CE33CED8F907
-:10176000F5828E83EDF0E52BAE2A7802C333CE33BB
-:10177000CED8F9FFF5828E83A3E5FEF08F828E83AB
-:10178000A3A3E5FDF08F828E83A3A3A3E5FCF0C3A2
-:10179000E52B94FAE52A94005008052BE52B7002FE
-:1017A000052A22E4FFE4F558F556F5577482FC1239
-:1017B0000E048C83E0F510547FF0E5104480120E87
-:1017C00098EDF07E0A120E047583A0E020E026DE7C
-:1017D000F40557E55770020556E5142401FDE4337E
-:1017E000FCD3E5579DE5569C40D9E50A942050026C
-:1017F000050A43E108C231120E047583A6E05512B2
-:1018000065127003D23122C23122900726E0FAA37A
-:10181000E0F5828A83E0F541E539C395414026E54C
-:10182000399541C39FEE12076B40047C0180027C16
-:1018300000E541643F60047B0180027B00EC5B605B
-:101840002905418028C3E5419539C39FEE12076BF6
-:1018500040047F0180027F00E54160047E01800238
-:101860007E00EF5E600415418003853941853A4072
-:1018700022E5E230E460E5E130E25BE50970047FF7
-:101880000180027F00E50870047E0180027E00EE88
-:101890005F604353F9F8E5E230E43BE5E130E22EE6
-:1018A00043FA0253FAFBE4F510909470E510F0E56A
-:1018B000E130E2E7909470E06510600343FA0405BC
-:1018C00010909470E510F070E612000680E153FA73
-:1018D000FD53FAFB80C0228F54120006E5E130E090
-:1018E000047F0180027F00E57ED3940540047E01E1
-:1018F00080027E00EE4F603D855411E5E220E1322A
-:1019000074CE121A0530E7047D0180027D008F82BB
-:101910008E83E030E6047F0180027F00EF5D70156A
-:101920001215C674CE121A0530E607E04480F04363
-:10193000F98012187122120E44E51625E025E024E4
-:10194000B0F582E4341AF583E493F50FE51625E04B
-:1019500025E024B1F582E4341AF583E493F50E1200
-:101960000E65F510E50F54F0120E1775838CEFF02D
-:10197000E50F30E00C120E04758386E04440F080E1
-:101980000A120E04758386E054BFF0120E9175831F
-:1019900082E50EF0227F05121731120E04120E336B
-:1019A0007402F0748EFE120E04120E0BEFF0751519
-:1019B00070120FF72034057515108003751550123D
-:1019C0000FF72034047410800274F02515F51512F9
-:1019D0000E21EFF0121091203417E5156430600CE1
-:1019E00074102515F515B48003E4F515120E21EFDA
-:1019F000F022F0E50B25E025E02482F582E43407AF
-:101A0000F583227488FEE5084407FFF5828E83E0A3
-:101A100022F0E5084407F58222F0E054C08F828E60
-:101A200083F022EF4407F582758386E05410D39447
-:101A30000022F0900715E004F0224406F582758339
-:101A40009EE022FEEF4407F5828E83E022E49007B9
-:101A50002AF0A3F012072A758382E0547F12072927
-:101A6000E04480F01210FC12081F7583A0E020E013
-:101A70001A90072BE004F0700690072AE004F0901B
-:101A8000072AE0B410E1A3E0B400DCEE44A6FCEFCA
-:101A90004407F5828C83E0F532EE44A8FEEF44075C
-:101AA000F5828E83E0F5332201201100042000909E
-:101AB00000200F9200210F9400220F9600230F9810
-:101AC00000240F9A00250F9C00260F9E00270FA0D0
-:101AD000012001A2012101A4012201A6012301A8E4
-:101AE000012401AA012501AC012601AE012701B0A4
-:101AF000012801B400280FB640280FB8612801CB97
-:101B0000EFCBCAEECA7F01E4FDEB4A7024E508F58D
-:101B10008274B6120829E508F58274B8120829E51E
-:101B200008F58274BA1208297E007C00120AFF8030
-:101B300012900726120735E541F090072412073569
-:101B4000E540F012072A75838EE41207297401120A
-:101B50000729E4F022E4F526F52753E1FEF52A757E
-:101B60002B01F5087F0112173130301C901AA9E4BF
-:101B700093F510901FF9E493F510900041E493F56C
-:101B800010901ECAE493F5107F02121731120F5401
-:101B90007F03121731120006E5E230E70912100048
-:101BA00030300312110002004712081F7583D0E085
-:101BB000C4540FFD7543017544FF1208AA7404F064
-:101BC000753B01ED14600C14600B14600F2403705E
-:101BD0000B800980001208A704F080061208A77481
-:101BE00004F0EE4482FEEF4407F5828E83E5451251
-:101BF00008BE758382E531F002114C8E608F611250
-:101C00001EA5E4FFCEEDCEEED39561E56012076B25
-:101C1000403974202EF582E43403F583E07003FF2D
-:101C200080261208E2FDC39F401ECFEDCFEB4A7025
-:101C30000B8D421208EEF5418E40800C1208E2F541
-:101C4000381208EEF5398E3A1E80BC22755801E52F
-:101C500035700C1207CCE0F54A1207D8E0F54CE5D8
-:101C600035B4040C1207E4E0F54A1207F0E0F54C35
-:101C7000E535B401047F0180027F00E535B402043C
-:101C80007E0180027E00EE4F600C1207FCE0F54AF8
-:101C9000120808E0F54C85414985404B22755B01EF
-:101CA000900724120735E0541FFFD3940250048F8D
-:101CB000588005EF24FEF558EFC394184005755978
-:101CC000188004EF04F55985435AAF587E00AD598A
-:101CD0007C00AB5B7A00121541AF5A7E0012180AE5
-:101CE000AF5B7E00021AFFE5E230E70E121003C27E
-:101CF000303030031210FF203328E5E730E70512BB
-:101D00000EA2800DE5FEC394205006120EA243F9E8
-:101D100008E5F230E70353F97FE5F15470D39400FE
-:101D200050D822120E04758380E4F0E508440712AF
-:101D30000DFD758384120E02758386120E02758363
-:101D40008CE054F3120E0375838E120E0275839489
-:101D5000E054FBF02212072A75838EE412072974DF
-:101D600001120729E41208BE75838CE04420120892
-:101D7000BEE054DFF07484850882F583E0547FF080
-:101D8000E04480F022755601E4FDF557AF35FEFCC6
-:101D9000120915121C9D121E7A121C4CAF577E00A0
-:101DA000AD567C00120444AF567E000211EE75560B
-:101DB00001E4FDF557AF35FEFC120915121C9D120A
-:101DC0001E7A121C4CAF577E00AD567C00120444A4
-:101DD000AF567E000211EEE4F516120E44FEE50841
-:101DE0004405FF120E658F828E83F00516E516C33B
-:101DF000941440E6E508120E2BE4F022E4F558F5C1
-:101E000059F55AFFFEAD58FC1209157F047E00AD4E
-:101E1000587C001209157F027E00AD587C00020933
-:101E200015E53C253EFCE5422400FBE433FAECC317
-:101E30009BEA12076B400B8C42E53D253FF5418F35
-:101E4000402212090B227484F5188508198519821D
-:101E5000851883E0547FF0E04480F0E04480F02275
-:101E6000EF4E700B12072A7583D2E054DFF0221276
-:101E7000072A7583D2E04420F02275580190072686
-:101E8000120735E0543FF541120732E0543FF54068
-:101E900022755602E4F557121DFCAF577E00AD5671
-:101EA0007C00020444E4F542F541F540F538F5398B
-:101EB000F53A22EF5407FFE5F954F84FF5F9227F80
-:101EC00001E4FE0F0EBEFFFB2201200001042000F2
-:101ED0000000000000000000000000000000000002
-:101EE00000000000000000000000000000000000F2
-:101EF00000000000000000000000000000000000E2
-:101F000000000000000000000000000000000000D1
-:101F100000000000000000000000000000000000C1
-:101F200000000000000000000000000000000000B1
-:101F300000000000000000000000000000000000A1
-:101F40000000000000000000000000000000000091
-:101F50000000000000000000000000000000000081
-:101F60000000000000000000000000000000000071
-:101F70000000000000000000000000000000000061
-:101F80000000000000000000000000000000000051
-:101F90000000000000000000000000000000000041
-:101FA0000000000000000000000000000000000031
-:101FB0000000000000000000000000000000000021
-:101FC0000000000000000000000000000000000011
-:101FD0000000000000000000000000000000000001
-:101FE00000000000000000000000000000000000F1
-:101FF000000000000000000001201100042000810A
-:00000001FF
index aea605c98ba6b4eb920a022acaf6c4587ed653f9..aae187a7f94a661edb82189496242fdac4c33fcf 100644 (file)
@@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev)
        ihold(bdev->bd_inode);
        return bdev;
 }
+EXPORT_SYMBOL(bdgrab);
 
 long nr_blockdev_pages(void)
 {
index ecd25a1b4e519562ff874112515052403877fbc4..ca9d8f1a3bb67968dab36b8df98c0c2093a2300f 100644 (file)
@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, NULL))
                return 0;
 
+       __tree_mod_log_free_eb(fs_info, old_root);
+
        ret = tree_mod_alloc(fs_info, flags, &tm);
        if (ret < 0)
                goto out;
@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
 static noinline void
 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
                     struct extent_buffer *src, unsigned long dst_offset,
-                    unsigned long src_offset, int nr_items)
+                    unsigned long src_offset, int nr_items, int log_removal)
 {
        int ret;
        int i;
@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
        }
 
        for (i = 0; i < nr_items; i++) {
-               ret = tree_mod_log_insert_key_locked(fs_info, src,
-                                                    i + src_offset,
-                                                    MOD_LOG_KEY_REMOVE);
-               BUG_ON(ret < 0);
+               if (log_removal) {
+                       ret = tree_mod_log_insert_key_locked(fs_info, src,
+                                                       i + src_offset,
+                                                       MOD_LOG_KEY_REMOVE);
+                       BUG_ON(ret < 0);
+               }
                ret = tree_mod_log_insert_key_locked(fs_info, dst,
                                                     i + dst_offset,
                                                     MOD_LOG_KEY_ADD);
@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
-               tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                btrfs_set_node_ptr_generation(parent, parent_slot,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
+               tree_mod_log_free_eb(root->fs_info, buf);
                btrfs_free_tree_block(trans, root, buf, parent_start,
                                      last_ref);
        }
@@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
-               tree_mod_log_free_eb(root->fs_info, root->node);
                tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
@@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
                push_items = min(src_nritems - 8, push_items);
 
        tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
-                            push_items);
+                            push_items, 1);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(dst_nritems),
                           btrfs_node_key_ptr_offset(0),
@@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                                      sizeof(struct btrfs_key_ptr));
 
        tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
-                            src_nritems - push_items, push_items);
+                            src_nritems - push_items, push_items, 1);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        int mid;
        int ret;
        u32 c_nritems;
+       int tree_mod_log_removal = 1;
 
        c = path->nodes[level];
        WARN_ON(btrfs_header_generation(c) != trans->transid);
        if (c == root->node) {
                /* trying to split the root, lets make a new one */
                ret = insert_new_root(trans, root, path, level + 1);
+               /*
+                * removal of root nodes has been logged by
+                * tree_mod_log_set_root_pointer due to locking
+                */
+               tree_mod_log_removal = 0;
                if (ret)
                        return ret;
        } else {
@@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
                            (unsigned long)btrfs_header_chunk_tree_uuid(split),
                            BTRFS_UUID_SIZE);
 
-       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
+       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
+                            tree_mod_log_removal);
        copy_extent_buffer(split, c,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(mid),
index 7d84651e850b22cb937b29c68d34c1119af1e4a0..6d19a0a554aadc3aeadbe1df6eff03acd489375e 100644 (file)
@@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
                                      0, objectid, NULL, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
+               leaf = NULL;
                goto fail;
        }
 
@@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 
        btrfs_tree_unlock(leaf);
 
+       return root;
+
 fail:
-       if (ret)
-               return ERR_PTR(ret);
+       if (leaf) {
+               btrfs_tree_unlock(leaf);
+               free_extent_buffer(leaf);
+       }
+       kfree(root);
 
-       return root;
+       return ERR_PTR(ret);
 }
 
 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
@@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                btrfs_free_log(NULL, root);
                btrfs_free_log_root_tree(NULL, fs_info);
        }
index 9ac2eca681ebb09c605612dd563bc4417e4fc587..3d551231cabae9e43c7a1e90385f22fd25891239 100644 (file)
@@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
                cache->bytes_super += stripe_len;
                ret = add_excluded_extent(root, cache->key.objectid,
                                          stripe_len);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret)
+                       return ret;
        }
 
        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root,
                ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
                                       cache->key.objectid, bytenr,
                                       0, &logical, &nr, &stripe_len);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret)
+                       return ret;
 
                while (nr--) {
                        cache->bytes_super += stripe_len;
                        ret = add_excluded_extent(root, logical[nr],
                                                  stripe_len);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret) {
+                               kfree(logical);
+                               return ret;
+                       }
                }
 
                kfree(logical);
@@ -4438,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
        spin_lock(&sinfo->lock);
        spin_lock(&block_rsv->lock);
 
-       block_rsv->size = num_bytes;
+       block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
 
        num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
                    sinfo->bytes_reserved + sinfo->bytes_readonly +
@@ -4793,14 +4798,49 @@ out_fail:
         * If the inodes csum_bytes is the same as the original
         * csum_bytes then we know we haven't raced with any free()ers
         * so we can just reduce our inodes csum bytes and carry on.
-        * Otherwise we have to do the normal free thing to account for
-        * the case that the free side didn't free up its reserve
-        * because of this outstanding reservation.
         */
-       if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+       if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
                calc_csum_metadata_size(inode, num_bytes, 0);
-       else
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+       } else {
+               u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
+               u64 bytes;
+
+               /*
+                * This is tricky, but first we need to figure out how much we
+                * free'd from any free-ers that occured during this
+                * reservation, so we reset ->csum_bytes to the csum_bytes
+                * before we dropped our lock, and then call the free for the
+                * number of bytes that were freed while we were trying our
+                * reservation.
+                */
+               bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
+               BTRFS_I(inode)->csum_bytes = csum_bytes;
+               to_free = calc_csum_metadata_size(inode, bytes, 0);
+
+
+               /*
+                * Now we need to see how much we would have freed had we not
+                * been making this reservation and our ->csum_bytes were not
+                * artificially inflated.
+                */
+               BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
+               bytes = csum_bytes - orig_csum_bytes;
+               bytes = calc_csum_metadata_size(inode, bytes, 0);
+
+               /*
+                * Now reset ->csum_bytes to what it should be.  If bytes is
+                * more than to_free then we would have free'd more space had we
+                * not had an artificially high ->csum_bytes, so we need to free
+                * the remainder.  If bytes is the same or less then we don't
+                * need to do anything, the other free-ers did the correct
+                * thing.
+                */
+               BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
+               if (bytes > to_free)
+                       to_free = bytes - to_free;
+               else
+                       to_free = 0;
+       }
        spin_unlock(&BTRFS_I(inode)->lock);
        if (dropped)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -7947,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                 * info has super bytes accounted for, otherwise we'll think
                 * we have more space than we actually do.
                 */
-               exclude_super_stripes(root, cache);
+               ret = exclude_super_stripes(root, cache);
+               if (ret) {
+                       /*
+                        * We may have excluded something, so call this just in
+                        * case.
+                        */
+                       free_excluded_extents(root, cache);
+                       kfree(cache->free_space_ctl);
+                       kfree(cache);
+                       goto error;
+               }
 
                /*
                 * check for two cases, either we are full, and therefore
@@ -8089,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 
        cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
-       exclude_super_stripes(root, cache);
+       ret = exclude_super_stripes(root, cache);
+       if (ret) {
+               /*
+                * We may have excluded something, so call this just in
+                * case.
+                */
+               free_excluded_extents(root, cache);
+               kfree(cache->free_space_ctl);
+               kfree(cache);
+               return ret;
+       }
 
        add_new_free_space(cache, root->fs_info, chunk_offset,
                           chunk_offset + size);
index f173c5af64610de66597cce1e38ac493729d6125..cdee391fc7bfd57c596204a8474142e7afe60335 100644 (file)
@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
                                GFP_NOFS);
 }
 
+int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+       unsigned long index = start >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       struct page *page;
+
+       while (index <= end_index) {
+               page = find_get_page(inode->i_mapping, index);
+               BUG_ON(!page); /* Pages should be in the extent_io_tree */
+               clear_page_dirty_for_io(page);
+               page_cache_release(page);
+               index++;
+       }
+       return 0;
+}
+
+int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+       unsigned long index = start >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       struct page *page;
+
+       while (index <= end_index) {
+               page = find_get_page(inode->i_mapping, index);
+               BUG_ON(!page); /* Pages should be in the extent_io_tree */
+               account_page_redirty(page);
+               __set_page_dirty_nobuffers(page);
+               page_cache_release(page);
+               index++;
+       }
+       return 0;
+}
+
 /*
  * helper function to set both pages and extents in the tree writeback
  */
index 6068a1985560eed72a8df42fa528377c3584c93a..258c92156857f1d1e9e525a89d7a706d6c126c66 100644 (file)
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
                      unsigned long *map_len);
 int extent_range_uptodate(struct extent_io_tree *tree,
                          u64 start, u64 end);
+int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
+int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
                                struct extent_io_tree *tree,
                                u64 start, u64 end, struct page *locked_page,
index ec160202be3e38057c26498558e2f309af7859b7..c4628a201cb30fe6e2daa09eff83bd1fec79ecf1 100644 (file)
@@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
                csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
                csums_in_item /= csum_size;
 
-               if (csum_offset >= csums_in_item) {
+               if (csum_offset == csums_in_item) {
                        ret = -EFBIG;
                        goto fail;
+               } else if (csum_offset > csums_in_item) {
+                       goto fail;
                }
        }
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
@@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        sector_sum = sums->sums;
-       trans->adding_csums = 1;
 again:
        next_offset = (u64)-1;
        found_next = 0;
@@ -899,7 +900,6 @@ next_sector:
                goto again;
        }
 out:
-       trans->adding_csums = 0;
        btrfs_free_path(path);
        return ret;
 
index 5b4ea5f55b8f47d0bf9a90992dd9da2d33880e14..ade03e6f7bd2706bbaac3406f1bcc31cf70cb578 100644 (file)
@@ -2142,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 {
        struct inode *inode = file_inode(file);
        struct extent_state *cached_state = NULL;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 cur_offset;
        u64 last_byte;
        u64 alloc_start;
@@ -2169,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode,
        ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
        if (ret)
                return ret;
+       if (root->fs_info->quota_enabled) {
+               ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
+               if (ret)
+                       goto out_reserve_fail;
+       }
 
        /*
         * wait for ordered IO before we have any locks.  We'll loop again
@@ -2272,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode,
                             &cached_state, GFP_NOFS);
 out:
        mutex_unlock(&inode->i_mutex);
+       if (root->fs_info->quota_enabled)
+               btrfs_qgroup_free(root, alloc_end - alloc_start);
+out_reserve_fail:
        /* Let go of our reservation. */
        btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
        return ret;
index ca1b767d51f760672f8de72c48efbf3df8e58a75..09c58a35b429d6a82dc261014df2875e7c15f920 100644 (file)
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
        int i;
        int will_compress;
        int compress_type = root->fs_info->compress_type;
+       int redirty = 0;
 
        /* if this is a small write inside eof, kick off a defrag */
        if ((end - start + 1) < 16 * 1024 &&
@@ -415,6 +416,17 @@ again:
                if (BTRFS_I(inode)->force_compress)
                        compress_type = BTRFS_I(inode)->force_compress;
 
+               /*
+                * we need to call clear_page_dirty_for_io on each
+                * page in the range.  Otherwise applications with the file
+                * mmap'd can wander in and change the page contents while
+                * we are compressing them.
+                *
+                * If the compression fails for any reason, we set the pages
+                * dirty again later on.
+                */
+               extent_range_clear_dirty_for_io(inode, start, end);
+               redirty = 1;
                ret = btrfs_compress_pages(compress_type,
                                           inode->i_mapping, start,
                                           total_compressed, pages,
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed:
                        __set_page_dirty_nobuffers(locked_page);
                        /* unlocked later on in the async handlers */
                }
+               if (redirty)
+                       extent_range_redirty_for_io(inode, start, end);
                add_async_extent(async_cow, start, end - start + 1,
                                 0, NULL, 0, BTRFS_COMPRESS_NONE);
                *num_added += 1;
@@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
        struct btrfs_ordered_sum *sum;
 
        list_for_each_entry(sum, list, list) {
+               trans->adding_csums = 1;
                btrfs_csum_file_blocks(trans,
                       BTRFS_I(inode)->root->fs_info->csum_root, sum);
+               trans->adding_csums = 0;
        }
        return 0;
 }
@@ -3679,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
         * 1 for the dir item
         * 1 for the dir index
         * 1 for the inode ref
-        * 1 for the inode ref in the tree log
-        * 2 for the dir entries in the log
         * 1 for the inode
         */
-       trans = btrfs_start_transaction(root, 8);
+       trans = btrfs_start_transaction(root, 5);
        if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
                return trans;
 
@@ -8127,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
         * should cover the worst case number of items we'll modify.
         */
-       trans = btrfs_start_transaction(root, 20);
+       trans = btrfs_start_transaction(root, 11);
        if (IS_ERR(trans)) {
                 ret = PTR_ERR(trans);
                 goto out_notrans;
index dc08d77b717ea47f0eb7d43e351153f556c75eaa..005c45db699eecc0fef93fca0832b91633dd5d8a 100644 (file)
@@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
        INIT_LIST_HEAD(&splice);
        INIT_LIST_HEAD(&works);
 
+       mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
        list_splice_init(&root->fs_info->ordered_extents, &splice);
        while (!list_empty(&splice)) {
@@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
 
                cond_resched();
        }
+       mutex_unlock(&root->fs_info->ordered_operations_mutex);
 }
 
 /*
index 5471e47d6559eafeb36e55035cb427710c5ee4ba..b44124dd2370ea726e66a099400163c342d8fa12 100644 (file)
@@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
        ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
                                   sgn > 0 ? node->seq - 1 : node->seq, &roots);
        if (ret < 0)
-               goto out;
+               return ret;
 
        spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
@@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
        ret = 0;
 unlock:
        spin_unlock(&fs_info->qgroup_lock);
-out:
        ulist_free(roots);
        ulist_free(tmp);
 
index 53c3501fa4ca348f13cb17617b363643a59b1f6f..85e072b956d564d64c527c1b58d3b5b26019c34f 100644 (file)
@@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
        eb = path->nodes[0];
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
-       btrfs_release_path(path);
 
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                do {
@@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
                                ret < 0 ? -1 : ref_level,
                                ret < 0 ? -1 : ref_root);
                } while (ret != 1);
+               btrfs_release_path(path);
        } else {
+               btrfs_release_path(path);
                swarn.path = path;
                swarn.dev = dev;
                iterate_extent_inodes(fs_info, found_key.objectid,
index f7a8b861058b5234094d24e9fc35a49d9e78fe1c..c85e7c6b4598af950d240582f648224570889ca4 100644 (file)
@@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
                    found_key.type != key.type) {
                        key.offset += right_len;
                        break;
-               } else {
-                       if (found_key.offset != key.offset + right_len) {
-                               /* Should really not happen */
-                               ret = -EIO;
-                               goto out;
-                       }
+               }
+               if (found_key.offset != key.offset + right_len) {
+                       ret = 0;
+                       goto out;
                }
                key = found_key;
        }
index 5989a92236f7f1b578ec3ccf88078bb69c1093d3..2854c824ab6443d04eac19cf0b540f79c257cfa0 100644 (file)
@@ -4935,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        em = lookup_extent_mapping(em_tree, chunk_start, 1);
        read_unlock(&em_tree->lock);
 
-       BUG_ON(!em || em->start != chunk_start);
+       if (!em) {
+               printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
+                      chunk_start);
+               return -EIO;
+       }
+
+       if (em->start != chunk_start) {
+               printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
+                      em->start, chunk_start);
+               free_extent_map(em);
+               return -EIO;
+       }
        map = (struct map_lookup *)em->bdev;
 
        length = em->len;
index fbfae008ba44e3e8223f42d4823cfeae2a673ac9..e8bc3420d63edcc28f0992217c860082fe424261 100644 (file)
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
        bool slash = false;
        int error = 0;
 
-       br_read_lock(&vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
-out:
-       br_read_unlock(&vfsmount_lock);
        return error;
 
 global_root:
@@ -2590,7 +2587,7 @@ global_root:
                error = prepend(buffer, buflen, "/", 1);
        if (!error)
                error = is_mounted(vfsmnt) ? 1 : 2;
-       goto out;
+       return error;
 }
 
 /**
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
        int error;
 
        prepend(&res, &buflen, "\0", 1);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
 
        if (error < 0)
                return ERR_PTR(error);
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
        int error;
 
        prepend(&res, &buflen, "\0", 1);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = prepend_path(path, &root, &res, &buflen);
        write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
 
        if (error > 1)
                error = -EINVAL;
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
                return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
        get_fs_root(current->fs, &root);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = path_with_deleted(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
        if (error < 0)
                res = ERR_PTR(error);
-       write_sequnlock(&rename_lock);
        path_put(&root);
        return res;
 }
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        get_fs_root_and_pwd(current->fs, &root, &pwd);
 
        error = -ENOENT;
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
                prepend(&cwd, &buflen, "\0", 1);
                error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
+               br_read_unlock(&vfsmount_lock);
 
                if (error < 0)
                        goto out;
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
                }
        } else {
                write_sequnlock(&rename_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 
 out:
index 56efcaadf8485a5887ae04493b9cacf28b5c73ef..9c6d06dcef8bf1f997c0f05bfe443fcde993d72b 100644 (file)
@@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle,
                        if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
                                zero_ex.ee_block = ex2->ee_block;
-                               zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
+                               zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(ex2));
                                ext4_ext_store_pblock(&zero_ex,
                                                      ext4_ext_pblock(ex2));
                        } else {
                                err = ext4_ext_zeroout(inode, ex);
                                zero_ex.ee_block = ex->ee_block;
-                               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+                               zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(ex));
                                ext4_ext_store_pblock(&zero_ex,
                                                      ext4_ext_pblock(ex));
                        }
                } else {
                        err = ext4_ext_zeroout(inode, &orig_ex);
                        zero_ex.ee_block = orig_ex.ee_block;
-                       zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
+                       zero_ex.ee_len = cpu_to_le16(
+                                               ext4_ext_get_actual_len(&orig_ex));
                        ext4_ext_store_pblock(&zero_ex,
                                              ext4_ext_pblock(&orig_ex));
                }
@@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                if (err)
                        goto out;
                zero_ex.ee_block = ex->ee_block;
-               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+               zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
                ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
 
                err = ext4_ext_get_access(handle, inode, path + depth);
index b505a145a5937a426184d5c289b6825ee62d0e17..a04183127ef049190ad96c0b7052ea4495fd33e9 100644 (file)
@@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                blk = *i_data;
                if (level > 0) {
                        ext4_lblk_t first2;
-                       bh = sb_bread(inode->i_sb, blk);
+                       bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
                        if (!bh) {
-                               EXT4_ERROR_INODE_BLOCK(inode, blk,
+                               EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
                                                       "Read failure");
                                return -EIO;
                        }
index 507141fceb99669f7554e330d81605d2fff248b2..4be78237d896d759229eaf300f583288e1a28086 100644 (file)
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool);
  * dcache.c
  */
 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
+
+/*
+ * read_write.c
+ */
+extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
index 50ca17d3cb4506de87465bb4d62f3da5f00553a5..d581e45c0a9fd6ada94edd2bb6ea4fe4c7fc9e23 100644 (file)
@@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
        }
 
        mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+       /* Don't allow unprivileged users to change mount flags */
+       if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
+               mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
        atomic_inc(&sb->s_active);
        mnt->mnt.mnt_sb = sb;
        mnt->mnt.mnt_root = dget(root);
@@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
        if (readonly_request == __mnt_is_readonly(mnt))
                return 0;
 
+       if (mnt->mnt_flags & MNT_LOCK_READONLY)
+               return -EPERM;
+
        if (readonly_request)
                error = mnt_make_readonly(real_mount(mnt));
        else
@@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        /* First pass: copy the tree topology */
        copy_flags = CL_COPY_ALL | CL_EXPIRE;
        if (user_ns != mnt_ns->user_ns)
-               copy_flags |= CL_SHARED_TO_SLAVE;
+               copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
        new = copy_tree(old, old->mnt.mnt_root, copy_flags);
        if (IS_ERR(new)) {
                up_write(&namespace_sem);
@@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt)
        return check_mnt(real_mount(mnt));
 }
 
+bool current_chrooted(void)
+{
+       /* Does the current process have a non-standard root */
+       struct path ns_root;
+       struct path fs_root;
+       bool chrooted;
+
+       /* Find the namespace root */
+       ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
+       ns_root.dentry = ns_root.mnt->mnt_root;
+       path_get(&ns_root);
+       while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
+               ;
+
+       get_fs_root(current->fs, &fs_root);
+
+       chrooted = !path_equal(&fs_root, &ns_root);
+
+       path_put(&fs_root);
+       path_put(&ns_root);
+
+       return chrooted;
+}
+
+void update_mnt_policy(struct user_namespace *userns)
+{
+       struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+       struct mount *mnt;
+
+       down_read(&namespace_sem);
+       list_for_each_entry(mnt, &ns->list, mnt_list) {
+               switch (mnt->mnt.mnt_sb->s_magic) {
+               case SYSFS_MAGIC:
+                       userns->may_mount_sysfs = true;
+                       break;
+               case PROC_SUPER_MAGIC:
+                       userns->may_mount_proc = true;
+                       break;
+               }
+               if (userns->may_mount_sysfs && userns->may_mount_proc)
+                       break;
+       }
+       up_read(&namespace_sem);
+}
+
 static void *mntns_get(struct task_struct *task)
 {
        struct mnt_namespace *ns = NULL;
index 737d839bc17b5aa0ae58e5350a235af1f8adfb2b..6fc7b5cae92bf6526bee07696f12977322f2cb5e 100644 (file)
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
 
        bl_pipe_msg.bl_wq = &nn->bl_wq;
        memset(msg, 0, sizeof(*msg));
-       msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
+       msg->len = sizeof(bl_msg) + bl_msg.totallen;
+       msg->data = kzalloc(msg->len, GFP_NOFS);
        if (!msg->data)
                goto out;
 
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
        memcpy(msg->data, &bl_msg, sizeof(bl_msg));
        dataptr = (uint8_t *) msg->data;
        memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
-       msg->len = sizeof(bl_msg) + bl_msg.totallen;
 
        add_wait_queue(&nn->bl_wq, &wq);
        if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
index dc0f98dfa71773bd467a1b6891b2f916b696b734..c516da5873fd12df0d3c9877b2c3189423c0dec4 100644 (file)
@@ -726,9 +726,9 @@ out1:
        return ret;
 }
 
-static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data)
+static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
 {
-       return key_instantiate_and_link(key, data, strlen(data) + 1,
+       return key_instantiate_and_link(key, data, datalen,
                                        id_resolver_cache->thread_keyring,
                                        authkey);
 }
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
                struct key *key, struct key *authkey)
 {
        char id_str[NFS_UINT_MAXLEN];
+       size_t len;
        int ret = -ENOKEY;
 
        /* ret = -ENOKEY */
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
        case IDMAP_CONV_NAMETOID:
                if (strcmp(upcall->im_name, im->im_name) != 0)
                        break;
-               sprintf(id_str, "%d", im->im_id);
-               ret = nfs_idmap_instantiate(key, authkey, id_str);
+               /* Note: here we store the NUL terminator too */
+               len = sprintf(id_str, "%d", im->im_id) + 1;
+               ret = nfs_idmap_instantiate(key, authkey, id_str, len);
                break;
        case IDMAP_CONV_IDTONAME:
                if (upcall->im_id != im->im_id)
                        break;
-               ret = nfs_idmap_instantiate(key, authkey, im->im_name);
+               len = strlen(im->im_name);
+               ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
                break;
        default:
                ret = -EINVAL;
index 49eeb044c109c0bb3c3878db7b8b60a780b50f97..4fb234d3aefb240f3d067523bda6df467336d2d0 100644 (file)
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
 {
        if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
                return;
-       clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
        pnfs_return_layout(inode);
 }
 
index b2671cb0f901b8e904ee4b91961d2029e12fb2dc..26431cf62ddbc393fd5fe1e432742be37d06e12e 100644 (file)
@@ -2632,7 +2632,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        int status;
 
        if (pnfs_ld_layoutret_on_setattr(inode))
-               pnfs_return_layout(inode);
+               pnfs_commit_and_return_layout(inode);
 
        nfs_fattr_init(fattr);
        
@@ -6416,22 +6416,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
 static void nfs4_layoutcommit_release(void *calldata)
 {
        struct nfs4_layoutcommit_data *data = calldata;
-       struct pnfs_layout_segment *lseg, *tmp;
-       unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
 
        pnfs_cleanup_layoutcommit(data);
-       /* Matched by references in pnfs_set_layoutcommit */
-       list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
-               list_del_init(&lseg->pls_lc_list);
-               if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
-                                      &lseg->pls_flags))
-                       pnfs_put_lseg(lseg);
-       }
-
-       clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
-       smp_mb__after_clear_bit();
-       wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
-
        put_rpccred(data->cred);
        kfree(data);
 }
index 48ac5aad62589cd7140f55a52d5ee63f31e743b5..4bdffe0ba025228803b65d4fe54ab5cb0adda0ed 100644 (file)
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range,
               lo_seg_intersecting(lseg_range, recall_range);
 }
 
+static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
+               struct list_head *tmp_list)
+{
+       if (!atomic_dec_and_test(&lseg->pls_refcount))
+               return false;
+       pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
+       list_add(&lseg->pls_list, tmp_list);
+       return true;
+}
+
 /* Returns 1 if lseg is removed from list, 0 otherwise */
 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
                             struct list_head *tmp_list)
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
                 */
                dprintk("%s: lseg %p ref %d\n", __func__, lseg,
                        atomic_read(&lseg->pls_refcount));
-               if (atomic_dec_and_test(&lseg->pls_refcount)) {
-                       pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
-                       list_add(&lseg->pls_list, tmp_list);
+               if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
                        rv = 1;
-               }
        }
        return rv;
 }
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        return lseg;
 }
 
+static void pnfs_clear_layoutcommit(struct inode *inode,
+               struct list_head *head)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+       struct pnfs_layout_segment *lseg, *tmp;
+
+       if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+               return;
+       list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
+               if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+                       continue;
+               pnfs_lseg_dec_and_remove_zero(lseg, head);
+       }
+}
+
 /*
  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  * when the layout segment list is empty.
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino)
        /* Reference matched in nfs4_layoutreturn_release */
        pnfs_get_layout_hdr(lo);
        empty = list_empty(&lo->plh_segs);
+       pnfs_clear_layoutcommit(ino, &tmp_list);
        pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (empty) {
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino)
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
 
-       WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
-
        lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
        if (unlikely(lrp == NULL)) {
                status = -ENOMEM;
@@ -845,6 +866,33 @@ out:
 }
 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
 
+int
+pnfs_commit_and_return_layout(struct inode *inode)
+{
+       struct pnfs_layout_hdr *lo;
+       int ret;
+
+       spin_lock(&inode->i_lock);
+       lo = NFS_I(inode)->layout;
+       if (lo == NULL) {
+               spin_unlock(&inode->i_lock);
+               return 0;
+       }
+       pnfs_get_layout_hdr(lo);
+       /* Block new layoutgets and read/write to ds */
+       lo->plh_block_lgets++;
+       spin_unlock(&inode->i_lock);
+       filemap_fdatawait(inode->i_mapping);
+       ret = pnfs_layoutcommit_inode(inode, true);
+       if (ret == 0)
+               ret = _pnfs_return_layout(inode);
+       spin_lock(&inode->i_lock);
+       lo->plh_block_lgets--;
+       spin_unlock(&inode->i_lock);
+       pnfs_put_layout_hdr(lo);
+       return ret;
+}
+
 bool pnfs_roc(struct inode *ino)
 {
        struct pnfs_layout_hdr *lo;
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
        dprintk("pnfs write error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
            PNFS_LAYOUTRET_ON_ERROR) {
-               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
        dprintk("pnfs read error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
            PNFS_LAYOUTRET_ON_ERROR) {
-               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
 
        list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
                if (lseg->pls_range.iomode == IOMODE_RW &&
-                   test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+                   test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
                        list_add(&lseg->pls_lc_list, listp);
        }
 }
 
+static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
+{
+       struct pnfs_layout_segment *lseg, *tmp;
+       unsigned long *bitlock = &NFS_I(inode)->flags;
+
+       /* Matched by references in pnfs_set_layoutcommit */
+       list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
+               list_del_init(&lseg->pls_lc_list);
+               pnfs_put_lseg(lseg);
+       }
+
+       clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+       smp_mb__after_clear_bit();
+       wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+}
+
 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
 {
        pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
 
        if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
                nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
+       pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
 }
 
 /*
index 94ba804177483d3c78694ae1708e95a2d34048d5..f5f8a470a647c7dc2f3a475e3e852d1aa7c97f5e 100644 (file)
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
 int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
 int _pnfs_return_layout(struct inode *);
+int pnfs_commit_and_return_layout(struct inode *);
 void pnfs_ld_write_done(struct nfs_write_data *);
 void pnfs_ld_read_done(struct nfs_read_data *);
 struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino)
        return 0;
 }
 
+static inline int pnfs_commit_and_return_layout(struct inode *inode)
+{
+       return 0;
+}
+
 static inline bool
 pnfs_ld_layoutret_on_setattr(struct inode *inode)
 {
index 01168865dd37395a047cbeeb0175d7c8a5b80f88..a2720071f282f7607f13d63af8d6e9d6cf18bce6 100644 (file)
@@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                iattr->ia_valid |= ATTR_SIZE;
        }
        if (bmval[0] & FATTR4_WORD0_ACL) {
-               int nace;
+               u32 nace;
                struct nfs4_ace *ace;
 
                READ_BUF(4); len += 4;
index 62c1ee128aebafbd82e70863e57caa74de804e13..ca05f6dc3544b4c216eb369d9e01fe7a48f0e90a 100644 (file)
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 {
        if (rp->c_type == RC_REPLBUFF)
                kfree(rp->c_replvec.iov_base);
-       hlist_del(&rp->c_hash);
+       if (!hlist_unhashed(&rp->c_hash))
+               hlist_del(&rp->c_hash);
        list_del(&rp->c_lru);
        --num_drc_entries;
        kmem_cache_free(drc_slab, rp);
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
 
 int nfsd_reply_cache_init(void)
 {
+       INIT_LIST_HEAD(&lru_head);
+       max_drc_entries = nfsd_cache_size_limit();
+       num_drc_entries = 0;
+
        register_shrinker(&nfsd_reply_cache_shrinker);
        drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
                                        0, 0, NULL);
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void)
        if (!cache_hash)
                goto out_nomem;
 
-       INIT_LIST_HEAD(&lru_head);
-       max_drc_entries = nfsd_cache_size_limit();
-       num_drc_entries = 0;
-
        return 0;
 out_nomem:
        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
index 2a7eb536de0bec80dfbfd7d981139f2e925e644e..2b2e2396a86913b4d4e69c376852ded82dd9be3a 100644 (file)
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
        int                     host_err;
        int                     stable = *stablep;
        int                     use_wgather;
+       loff_t                  pos = offset;
 
        dentry = file->f_path.dentry;
        inode = dentry->d_inode;
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
 
        /* Write the data. */
        oldfs = get_fs(); set_fs(KERNEL_DS);
-       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
+       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
        set_fs(oldfs);
        if (host_err < 0)
                goto out_nfserr;
index 3e000a51ac0d09556d184d26422a91d0bc4c0ff9..8b29d2164da6aef6edb88b9872368c849e658530 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mnt_namespace.h>
 #include <linux/mount.h>
 #include <linux/fs.h>
+#include <linux/nsproxy.h>
 #include "internal.h"
 #include "pnode.h"
 
@@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest,
 int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
                    struct mount *source_mnt, struct list_head *tree_list)
 {
+       struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
        struct mount *m, *child;
        int ret = 0;
        struct mount *prev_dest_mnt = dest_mnt;
@@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
 
                source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
 
+               /* Notice when we are propagating across user namespaces */
+               if (m->mnt_ns->user_ns != user_ns)
+                       type |= CL_UNPRIVILEGED;
+
                child = copy_tree(source, source->mnt.mnt_root, type);
                if (IS_ERR(child)) {
                        ret = PTR_ERR(child);
index 19b853a3445cb907665b4403484984a0c525af68..a0493d5ebfbf52be2eb07a794df459ab2a32cd6a 100644 (file)
@@ -23,6 +23,7 @@
 #define CL_MAKE_SHARED                 0x08
 #define CL_PRIVATE             0x10
 #define CL_SHARED_TO_SLAVE     0x20
+#define CL_UNPRIVILEGED                0x40
 
 static inline void set_mnt_shared(struct mount *mnt)
 {
index c6e9fac26bace4e9b63bd57dce624589dc67dfd7..9c7fab1d23f0d17d68446bf3d46ce1a906ae0907 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
+#include <linux/user_namespace.h>
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
 #include <linux/parser.h>
@@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        } else {
                ns = task_active_pid_ns(current);
                options = data;
+
+               if (!current_user_ns()->may_mount_proc)
+                       return ERR_PTR(-EPERM);
        }
 
        sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
index a698eff457fb6e510c0543c9b762cc8c8d702f23..e6ddc8dceb96fc48a8fbe379799d0efed52d2b4a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/splice.h>
 #include <linux/compat.h>
 #include "read_write.h"
+#include "internal.h"
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
 
 EXPORT_SYMBOL(do_sync_write);
 
+ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
+{
+       mm_segment_t old_fs;
+       const char __user *p;
+       ssize_t ret;
+
+       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+               return -EINVAL;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+       p = (__force const char __user *)buf;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       if (file->f_op->write)
+               ret = file->f_op->write(file, p, count, pos);
+       else
+               ret = do_sync_write(file, p, count, pos);
+       set_fs(old_fs);
+       if (ret > 0) {
+               fsnotify_modify(file);
+               add_wchar(current, ret);
+       }
+       inc_syscw(current);
+       return ret;
+}
+
 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
 {
        ssize_t ret;
index c196369fe4083128163ed66707d85cde1c0367fb..4cce1d9552fbbcd5f23447245a8b6f942c6fe89c 100644 (file)
@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
        if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
                return -ENOSPC;
 
-       if (name[0] == '.' && (name[1] == '\0' ||
-                              (name[1] == '.' && name[2] == '\0')))
+       if (name[0] == '.' && (namelen < 2 ||
+                              (namelen == 2 && name[1] == '.')))
                return 0;
 
        dentry = lookup_one_len(name, dbuf->xadir, namelen);
index 718bd0056384688af6ead056156574638c0be6e9..29e394e49ddda7c7721d3939d993632a29f4499a 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/security.h>
 #include <linux/gfp.h>
 #include <linux/socket.h>
+#include "internal.h"
 
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 {
        int ret;
        void *data;
+       loff_t tmp = sd->pos;
 
        data = buf->ops->map(pipe, buf, 0);
-       ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
+       ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
        buf->ops->unmap(pipe, buf, data);
 
        return ret;
index 2fbdff6be25ce3d546e17fa4e72783e9d3f03637..e14512678c9b7b8042f5ba55a329a0101849f606 100644 (file)
@@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
                ino = parent_sd->s_ino;
                if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
                        filp->f_pos++;
+               else
+                       return 0;
        }
        if (filp->f_pos == 1) {
                if (parent_sd->s_parent)
@@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
                        ino = parent_sd->s_ino;
                if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
                        filp->f_pos++;
+               else
+                       return 0;
        }
        mutex_lock(&sysfs_mutex);
        for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
@@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
        return 0;
 }
 
+static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+       struct inode *inode = file_inode(file);
+       loff_t ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = generic_file_llseek(file, offset, whence);
+       mutex_unlock(&inode->i_mutex);
+
+       return ret;
+}
 
 const struct file_operations sysfs_dir_operations = {
        .read           = generic_read_dir,
        .readdir        = sysfs_readdir,
        .release        = sysfs_dir_release,
-       .llseek         = generic_file_llseek,
+       .llseek         = sysfs_dir_llseek,
 };
index 8d924b5ec733450e37599c8338ab69ea3b15027f..afd83273e6cea8112e1570c8835bfa829303b2f8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
+#include <linux/user_namespace.h>
 
 #include "sysfs.h"
 
@@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        struct super_block *sb;
        int error;
 
+       if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs)
+               return ERR_PTR(-EPERM);
+
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info)
                return ERR_PTR(-ENOMEM);
index ac838b844936cdc29be5f3eff7192e9cb9a15582..f21acf0ef01f9535b7d3a576361c387593414747 100644 (file)
@@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        c->remounting_rw = 1;
        c->ro_mount = 0;
 
+       if (c->space_fixup) {
+               err = ubifs_fixup_free_space(c);
+               if (err)
+                       return err;
+       }
+
        err = check_free_space(c);
        if (err)
                goto out;
@@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
                err = dbg_check_space_info(c);
        }
 
-       if (c->space_fixup) {
-               err = ubifs_fixup_free_space(c);
-               if (err)
-                       goto out;
-       }
-
        mutex_unlock(&c->umount_mutex);
        return err;
 
index e3e0d651c6cab54b51745a242a844b142a64bc71..8c7846bd74f4c293551ac09b387363cf51e7d705 100644 (file)
@@ -310,7 +310,7 @@ struct drm_plane;
  * drm_crtc_funcs - control CRTCs for a given device
  * @save: save CRTC state
  * @restore: restore CRTC state
- * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @reset: reset CRTC after state has been invalidated (e.g. resume)
  * @cursor_set: setup the cursor
  * @cursor_move: move the cursor
  * @gamma_set: specify color ramp for CRTC
index c09511625a11244f8a07a9114952b9352f619f8d..8230b46fdd73ff916324a9d4d0dd064a13207502 100644 (file)
@@ -68,6 +68,10 @@ struct drm_fb_helper_funcs {
 
        int (*fb_probe)(struct drm_fb_helper *helper,
                        struct drm_fb_helper_surface_size *sizes);
+       bool (*initial_config)(struct drm_fb_helper *fb_helper,
+                              struct drm_fb_helper_crtc **crtcs,
+                              struct drm_display_mode **modes,
+                              bool *enabled, int width, int height);
 };
 
 struct drm_fb_helper_connector {
@@ -102,12 +106,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 int drm_fb_helper_set_par(struct fb_info *info);
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                            struct fb_info *info);
-int drm_fb_helper_setcolreg(unsigned regno,
-                           unsigned red,
-                           unsigned green,
-                           unsigned blue,
-                           unsigned transp,
-                           struct fb_info *info);
 
 bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
index 0fbd046e7c93653b0c862623e4716fdcfef39465..9c8dca79808eb1770c1efc6d2832fa6de089cdd0 100644 (file)
@@ -902,6 +902,10 @@ extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
  * ttm_bo_util.c
  */
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                      struct ttm_mem_reg *mem);
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                    struct ttm_mem_reg *mem);
 /**
  * ttm_bo_move_ttm
  *
index 76a87fb57ac258c4f004cee14737d8570da048f0..377cd8c3395eed02ac9f4dbadf7cac96dcf09de9 100644 (file)
@@ -141,11 +141,11 @@ typedef struct {
 } compat_sigset_t;
 
 struct compat_sigaction {
-#ifndef __ARCH_HAS_ODD_SIGACTION
+#ifndef __ARCH_HAS_IRIX_SIGACTION
        compat_uptr_t                   sa_handler;
        compat_ulong_t                  sa_flags;
 #else
-       compat_ulong_t                  sa_flags;
+       compat_uint_t                   sa_flags;
        compat_uptr_t                   sa_handler;
 #endif
 #ifdef __ARCH_HAS_SA_RESTORER
index a975de1ff59feaba6758afa9682ca9203bfc2c42..3bd46f766751caa9b143adcc5ce546441f6b868d 100644 (file)
@@ -51,7 +51,7 @@ struct task_struct;
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(void);
+extern void debug_check_no_locks_held(struct task_struct *task);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
 }
 
 static inline void
-debug_check_no_locks_held(void)
+debug_check_no_locks_held(struct task_struct *task)
 {
 }
 #endif
index 58b98606ac266fa86fdfe3a63c42b235a984e7b8..d49c60f5aa4c52250e5e5c9549ccb4fdf105d3ce 100644 (file)
@@ -501,6 +501,8 @@ struct fb_info {
                        resource_size_t size;
                } ranges[0];
        } *apertures;
+
+       bool skip_vt_switch; /* no VT switch on suspend/resume required */
 };
 
 static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
index 043a5cf8b5baf3e917dd9ba265c365a334365216..e70df40d84f6fe83c72f732aa44b454148661b6e 100644 (file)
@@ -3,7 +3,6 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
-#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void);
 
 static inline bool try_to_freeze(void)
 {
-       if (!(current->flags & PF_NOFREEZE))
-               debug_check_no_locks_held();
        might_sleep();
        if (likely(!freezing(current)))
                return false;
index 729eded4b24f09fa93b7f85b50aaca4b297fd2f9..2b93a9a5a1e6b8ef4a15a6aaf36ade3b48bd0d98 100644 (file)
@@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
        spin_unlock(&fs->lock);
 }
 
+extern bool current_chrooted(void);
+
 #endif /* _LINUX_FS_STRUCT_H */
index 5b18ecde69b58be85c4fb39db00464de7d6885bf..1aa4f13cdfa6a4f2b0ceec56a689600f8d53626c 100644 (file)
@@ -106,6 +106,29 @@ enum max77693_muic_reg {
        MAX77693_MUIC_REG_END,
 };
 
+/* MAX77693 INTMASK1~2 Register */
+#define INTMASK1_ADC1K_SHIFT           3
+#define INTMASK1_ADCERR_SHIFT          2
+#define INTMASK1_ADCLOW_SHIFT          1
+#define INTMASK1_ADC_SHIFT             0
+#define INTMASK1_ADC1K_MASK            (1 << INTMASK1_ADC1K_SHIFT)
+#define INTMASK1_ADCERR_MASK           (1 << INTMASK1_ADCERR_SHIFT)
+#define INTMASK1_ADCLOW_MASK           (1 << INTMASK1_ADCLOW_SHIFT)
+#define INTMASK1_ADC_MASK              (1 << INTMASK1_ADC_SHIFT)
+
+#define INTMASK2_VIDRM_SHIFT           5
+#define INTMASK2_VBVOLT_SHIFT          4
+#define INTMASK2_DXOVP_SHIFT           3
+#define INTMASK2_DCDTMR_SHIFT          2
+#define INTMASK2_CHGDETRUN_SHIFT       1
+#define INTMASK2_CHGTYP_SHIFT          0
+#define INTMASK2_VIDRM_MASK            (1 << INTMASK2_VIDRM_SHIFT)
+#define INTMASK2_VBVOLT_MASK           (1 << INTMASK2_VBVOLT_SHIFT)
+#define INTMASK2_DXOVP_MASK            (1 << INTMASK2_DXOVP_SHIFT)
+#define INTMASK2_DCDTMR_MASK           (1 << INTMASK2_DCDTMR_SHIFT)
+#define INTMASK2_CHGDETRUN_MASK                (1 << INTMASK2_CHGDETRUN_SHIFT)
+#define INTMASK2_CHGTYP_MASK           (1 << INTMASK2_CHGTYP_SHIFT)
+
 /* MAX77693 MUIC - STATUS1~3 Register */
 #define STATUS1_ADC_SHIFT              (0)
 #define STATUS1_ADCLOW_SHIFT           (5)
index 7acc9dc73c9f272bda990e741041a1b5c1237cc5..e19ff30ad0a21453cece0df7524d0b336a69078f 100644 (file)
@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
-#define VM_POPULATE     0x00001000
 #define VM_LOCKED      0x00002000
 #define VM_IO           0x00004000     /* Memory mapped I/O or similar */
 
index 61c7a87e5d2b358484dc16ab1b0db79253fc93b3..9aa863da287fedf383f3c507c287aa220cbd6a86 100644 (file)
@@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags)
 {
        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
               _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
-              ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) |
-              (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
-                                                       VM_POPULATE : 0);
+              _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    );
 }
 #endif /* _LINUX_MMAN_H */
index d7029f4a191a093ef9294104c75756b6d3b7faee..73005f9957ead2b95adb4107329434afbbf482a7 100644 (file)
@@ -47,6 +47,8 @@ struct mnt_namespace;
 
 #define MNT_INTERNAL   0x4000
 
+#define MNT_LOCK_READONLY      0x400000
+
 struct vfsmount {
        struct dentry *mnt_root;        /* root of the mounted tree */
        struct super_block *mnt_sb;     /* pointer to superblock */
index f14943d55315695e36186a75c5db56f07407dd20..f80af86743425ca6219c8c2cfc5b180f2325c85b 100644 (file)
@@ -24,8 +24,8 @@
 #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
 #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
 
-#define FB_SYNC_DATA_ENABLE_HIGH_ACT   (1 << 6)
-#define FB_SYNC_DOTCLK_FAILING_ACT     (1 << 7) /* failing/negtive edge sampling */
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT        (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FAILING_ACT  (1 << 7) /* failing/negtive edge sampling */
 
 struct mxsfb_platform_data {
        struct fb_videomode *mode_list;
@@ -44,6 +44,9 @@ struct mxsfb_platform_data {
                                 * allocated. If specified,fb_size must also be specified.
                                 * fb_phys must be unused by Linux.
                                 */
+       u32 sync;               /* sync mask, contains MXSFB specifics not
+                                * carried in fb_info->var.sync
+                                */
 };
 
 #endif /* __LINUX_MXSFB_H */
index b3d00fa4b3149b614365dd215a611ed638fdb924..8bfa95600e48bd11e2196571a39296a2fd7c0d5a 100644 (file)
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
  *
  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- *                          struct net_device *dev)
+ *                          struct net_device *dev, u32 filter_mask)
  *
  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  *     Called to change device carrier. Soft-devices (like dummy, team, etc)
index e5d7230332a4e4d9e369a0971aec86d7fdf36035..a224c7f5c377e796969a6d96b684d96b7779c9b5 100644 (file)
 extern void (*pm_power_off)(void);
 extern void (*pm_power_off_prepare)(void);
 
+struct device; /* we have a circular dep with device.h */
+#ifdef CONFIG_VT_CONSOLE_SLEEP
+extern void pm_vt_switch_required(struct device *dev, bool required);
+extern void pm_vt_switch_unregister(struct device *dev);
+#else
+static inline void pm_vt_switch_required(struct device *dev, bool required)
+{
+}
+static inline void pm_vt_switch_unregister(struct device *dev)
+{
+}
+#endif /* CONFIG_VT_CONSOLE_SLEEP */
+
 /*
  * Device power management
  */
index 2d8bdaef96116517cb8c6c1862a3cb82a3a863db..e96b9546c4c6ec68e26300c562bfd5620ba77e45 100644 (file)
@@ -235,13 +235,13 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  * sg page iterator
  *
  * Iterates over sg entries page-by-page.  On each successful iteration,
- * @piter->page points to the current page, @piter->sg to the sg holding this
- * page and @piter->sg_pgoffset to the page's page offset within the sg. The
- * iteration will stop either when a maximum number of sg entries was reached
- * or a terminating sg (sg_last(sg) == true) was reached.
+ * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
+ * to get the current page and its dma address. @piter->sg will point to the
+ * sg holding this page and @piter->sg_pgoffset to the page's page offset
+ * within the sg. The iteration will stop either when a maximum number of sg
+ * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
  */
 struct sg_page_iter {
-       struct page             *page;          /* current page */
        struct scatterlist      *sg;            /* sg holding the page */
        unsigned int            sg_pgoffset;    /* page offset within the sg */
 
@@ -255,6 +255,24 @@ bool __sg_page_iter_next(struct sg_page_iter *piter);
 void __sg_page_iter_start(struct sg_page_iter *piter,
                          struct scatterlist *sglist, unsigned int nents,
                          unsigned long pgoffset);
+/**
+ * sg_page_iter_page - get the current page held by the page iterator
+ * @piter:     page iterator holding the page
+ */
+static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
+{
+       return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+}
+
+/**
+ * sg_page_iter_dma_address - get the dma address of the current page held by
+ * the page iterator.
+ * @piter:     page iterator holding the page
+ */
+static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+{
+       return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+}
 
 /**
  * for_each_sg_page - iterate over the pages of the given sg list
index a2dcb94ea49de76e81bc6c9d200b555d4cbbba04..9475c5cb28bcc82827e9ed8c540fa59acd5067e9 100644 (file)
@@ -250,11 +250,11 @@ extern int show_unhandled_signals;
 extern int sigsuspend(sigset_t *);
 
 struct sigaction {
-#ifndef __ARCH_HAS_ODD_SIGACTION
+#ifndef __ARCH_HAS_IRIX_SIGACTION
        __sighandler_t  sa_handler;
        unsigned long   sa_flags;
 #else
-       unsigned long   sa_flags;
+       unsigned int    sa_flags;
        __sighandler_t  sa_handler;
 #endif
 #ifdef __ARCH_HAS_SA_RESTORER
index f0bd7f90a90d45d3aeeb3aed8bc2d2f8295c8be0..e3c0ae9bb1faf876afca191701e481ecc30a4f1b 100644 (file)
@@ -44,7 +44,7 @@
 /* Adding event notification support elements */
 #define THERMAL_GENL_FAMILY_NAME                "thermal_event"
 #define THERMAL_GENL_VERSION                    0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
+#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_grp"
 
 /* Default Thermal Governor */
 #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
index 9d81de123c9017f4463ba4fa09a61b8b7425a8bf..42278bbf7a882d90360d141db9c8cc1b1eca1719 100644 (file)
@@ -68,6 +68,7 @@ struct udp_sock {
         * For encapsulation sockets.
         */
        int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+       void (*encap_destroy)(struct sock *sk);
 };
 
 static inline struct udp_sock *udp_sk(const struct sock *sk)
index 0a78df5f6cfd23d616ecd4572b6079d1f8605dd2..59694b5e5e906821a873d52a1a33b9c5d67af5d4 100644 (file)
@@ -357,6 +357,7 @@ struct hc_driver {
                 */
        int     (*disable_usb3_lpm_timeout)(struct usb_hcd *,
                        struct usb_device *, enum usb3_link_state state);
+       int     (*find_raw_port_number)(struct usb_hcd *, int);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd);
 extern int usb_add_hcd(struct usb_hcd *hcd,
                unsigned int irqnum, unsigned long irqflags);
 extern void usb_remove_hcd(struct usb_hcd *hcd);
+extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
 
 struct platform_device;
 extern void usb_hcd_platform_shutdown(struct platform_device *dev);
index 4ce009324933ebc2b048ebd5f834b0c1bf44e52d..b6b215f13b453091ff2be0fae11b37a5a51c0c84 100644 (file)
@@ -26,6 +26,8 @@ struct user_namespace {
        kuid_t                  owner;
        kgid_t                  group;
        unsigned int            proc_inum;
+       bool                    may_mount_sysfs;
+       bool                    may_mount_proc;
 };
 
 extern struct user_namespace init_user_ns;
@@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns)
 
 #endif
 
+void update_mnt_policy(struct user_namespace *userns);
+
 #endif /* _LINUX_USER_H */
index 80461c1ae9efcceea9875000629f62608c6e699e..bb8271d487b7bea10861c6548dde50d0964c4a20 100644 (file)
@@ -9,6 +9,7 @@ struct flow_keys {
                __be32 ports;
                __be16 port16[2];
        };
+       u16 thoff;
        u8 ip_proto;
 };
 
index 68c69d54d39281ee033d765a1b9b66567c55d06f..fce8e6b66d558d8ff8bb629c4da5e358113a675e 100644 (file)
@@ -976,6 +976,7 @@ struct netns_ipvs {
        int                     sysctl_sync_retries;
        int                     sysctl_nat_icmp_send;
        int                     sysctl_pmtu_disc;
+       int                     sysctl_backup_only;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
        return ipvs->sysctl_pmtu_disc;
 }
 
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+       return ipvs->sync_state & IP_VS_STATE_BACKUP &&
+              ipvs->sysctl_backup_only;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
        return 1;
 }
 
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+       return 0;
+}
+
 #endif
 
 /*
index fd19625ff99db290fd289b3caf18f32c9b72ffc3..982141c15200d1fc60b144a2ca6ba53c0d320200 100644 (file)
@@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if (iph->frag_off & htons(IP_DF))
-               iph->id = 0;
-       else {
-               /* Use inner packet iph-id if possible. */
-               if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
-                       iph->id = old_iph->id;
-               else
-                       __ip_select_ident(iph, dst,
-                                         (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-       }
+       /* Use inner packet iph-id if possible. */
+       if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
+               iph->id = old_iph->id;
+       else
+               __ip_select_ident(iph, dst,
+                                 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 }
 #endif
index 399162b50a8d3479e68183f63f8e227f8db5f6c5..e1379b4e8faf569c6f95131577bba6c4c392e2b0 100644 (file)
@@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *);
 /*
  * DISCOVERY LAYER
  *****************************/
-int fc_disc_init(struct fc_lport *);
+void fc_disc_init(struct fc_lport *);
+void fc_disc_config(struct fc_lport *, void *);
 
 static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc)
 {
index ba99ce3f73722b2d5cee627838033ac8b84314ef..a042a957296d2923e37075fd502585b969617982 100644 (file)
@@ -8,6 +8,7 @@ header-y += i810_drm.h
 header-y += i915_drm.h
 header-y += mga_drm.h
 header-y += nouveau_drm.h
+header-y += qxl_drm.h
 header-y += r128_drm.h
 header-y += radeon_drm.h
 header-y += savage_drm.h
index 8d1e2bbee83a97153cfc5ec8251345cea7a10cba..73a99e4664bef6f033b155d11053c52e7635e1e5 100644 (file)
@@ -36,7 +36,7 @@
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#if defined(__linux__)
+#if defined(__KERNEL__) || defined(__linux__)
 
 #include <linux/types.h>
 #include <asm/ioctl.h>
index 3d6301b6ec16d36b60cb4f69570170b8c804fb6d..090e5331ab7eabe0c6c779d4eb44d02ff558f78d 100644 (file)
@@ -367,13 +367,13 @@ struct drm_mode_mode_cmd {
  * depending on the value in flags different members are used.
  *
  * CURSOR_BO uses
- *    crtc
+ *    crtc_id
  *    width
  *    height
- *    handle - if 0 turns the cursor of
+ *    handle - if 0 turns the cursor off
  *
  * CURSOR_MOVE uses
- *    crtc
+ *    crtc_id
  *    x
  *    y
  */
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
new file mode 100644 (file)
index 0000000..ebebd36
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef QXL_DRM_H
+#define QXL_DRM_H
+
+#include <stddef.h>
+#include "drm/drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define QXL_GEM_DOMAIN_CPU 0
+#define QXL_GEM_DOMAIN_VRAM 1
+#define QXL_GEM_DOMAIN_SURFACE 2
+
+#define DRM_QXL_ALLOC       0x00
+#define DRM_QXL_MAP         0x01
+#define DRM_QXL_EXECBUFFER  0x02
+#define DRM_QXL_UPDATE_AREA 0x03
+#define DRM_QXL_GETPARAM    0x04
+#define DRM_QXL_CLIENTCAP   0x05
+
+#define DRM_QXL_ALLOC_SURF  0x06
+
+struct drm_qxl_alloc {
+       uint32_t size;
+       uint32_t handle; /* 0 is an invalid handle */
+};
+
+struct drm_qxl_map {
+       uint64_t offset; /* use for mmap system call */
+       uint32_t handle;
+       uint32_t pad;
+};
+
+/*
+ * dest is the bo we are writing the relocation into
+ * src is bo we are relocating.
+ * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
+ * src_offset)
+ */
+#define QXL_RELOC_TYPE_BO 1
+#define QXL_RELOC_TYPE_SURF 2
+
+struct drm_qxl_reloc {
+       uint64_t src_offset; /* offset into src_handle or src buffer */
+       uint64_t dst_offset; /* offset in dest handle */
+       uint32_t src_handle; /* dest handle to compute address from */
+       uint32_t dst_handle; /* 0 if to command buffer */
+       uint32_t reloc_type;
+       uint32_t pad;
+};
+
+struct drm_qxl_command {
+       uint64_t         __user command; /* void* */
+       uint64_t         __user relocs; /* struct drm_qxl_reloc* */
+       uint32_t                type;
+       uint32_t                command_size;
+       uint32_t                relocs_num;
+       uint32_t                pad;
+};
+
+/* XXX: call it drm_qxl_commands? */
+struct drm_qxl_execbuffer {
+       uint32_t                flags;          /* for future use */
+       uint32_t                commands_num;
+       uint64_t         __user commands;       /* struct drm_qxl_command* */
+};
+
+struct drm_qxl_update_area {
+       uint32_t handle;
+       uint32_t top;
+       uint32_t left;
+       uint32_t bottom;
+       uint32_t right;
+       uint32_t pad;
+};
+
+#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
+#define QXL_PARAM_MAX_RELOCS 2
+struct drm_qxl_getparam {
+       uint64_t param;
+       uint64_t value;
+};
+
+/* these are one bit values */
+struct drm_qxl_clientcap {
+       uint32_t index;
+       uint32_t pad;
+};
+
+struct drm_qxl_alloc_surf {
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
+       int32_t stride;
+       uint32_t handle;
+       uint32_t pad;
+};
+
+#define DRM_IOCTL_QXL_ALLOC \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
+
+#define DRM_IOCTL_QXL_MAP \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
+
+#define DRM_IOCTL_QXL_EXECBUFFER \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
+               struct drm_qxl_execbuffer)
+
+#define DRM_IOCTL_QXL_UPDATE_AREA \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
+               struct drm_qxl_update_area)
+
+#define DRM_IOCTL_QXL_GETPARAM \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
+               struct drm_qxl_getparam)
+
+#define DRM_IOCTL_QXL_CLIENTCAP \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
+               struct drm_qxl_clientcap)
+
+#define DRM_IOCTL_QXL_ALLOC_SURF \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
+               struct drm_qxl_alloc_surf)
+
+#endif
index 93f5fa94a431c167d70902ce6ded5a2275215b53..afafd703ad92b1595a5c7c326883fe89816f920f 100644 (file)
@@ -33,9 +33,11 @@ enum {
        PACKET_DIAG_TX_RING,
        PACKET_DIAG_FANOUT,
 
-       PACKET_DIAG_MAX,
+       __PACKET_DIAG_MAX,
 };
 
+#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1)
+
 struct packet_diag_info {
        __u32   pdi_index;
        __u32   pdi_version;
index b8a24941db21e82b11d80d0912c057a54bcf7742..b9e2a6a7446f077e528dd15fac8e8d97f0d9a232 100644 (file)
@@ -39,9 +39,11 @@ enum {
        UNIX_DIAG_MEMINFO,
        UNIX_DIAG_SHUTDOWN,
 
-       UNIX_DIAG_MAX,
+       __UNIX_DIAG_MAX,
 };
 
+#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
+
 struct unix_diag_vfs {
        __u32   udiag_vfs_ino;
        __u32   udiag_vfs_dev;
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
new file mode 100644 (file)
index 0000000..0c3b46d
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Header containing platform_data structs for omap panels
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *        Archit Taneja <archit@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Mayuresh Janorkar <mayur@ti.com>
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_PANEL_DATA_H
+#define __OMAP_PANEL_DATA_H
+
+struct omap_dss_device;
+
+/**
+ * struct panel_generic_dpi_data - panel driver configuration data
+ * @name: panel name
+ * @platform_enable: platform specific panel enable function
+ * @platform_disable: platform specific panel disable function
+ * @num_gpios: number of gpios connected to panel
+ * @gpios: gpio numbers on the platform
+ * @gpio_invert: configure gpio as active high or low
+ */
+struct panel_generic_dpi_data {
+       const char *name;
+       int (*platform_enable)(struct omap_dss_device *dssdev);
+       void (*platform_disable)(struct omap_dss_device *dssdev);
+
+       int num_gpios;
+       int gpios[10];
+       bool gpio_invert[10];
+};
+
+/**
+ * struct panel_n8x0_data - N800 panel driver configuration data
+ */
+struct panel_n8x0_data {
+       int (*platform_enable)(struct omap_dss_device *dssdev);
+       void (*platform_disable)(struct omap_dss_device *dssdev);
+       int panel_reset;
+       int ctrl_pwrdown;
+};
+
+/**
+ * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration data
+ * @name: panel name
+ * @use_ext_te: use external TE
+ * @ext_te_gpio: external TE GPIO
+ * @esd_interval: interval of ESD checks, 0 = disabled (ms)
+ * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
+ * @use_dsi_backlight: true if panel uses DSI command to control backlight
+ * @pin_config: DSI pin configuration
+ */
+
+struct nokia_dsi_panel_data {
+       const char *name;
+
+       int reset_gpio;
+
+       bool use_ext_te;
+       int ext_te_gpio;
+
+       unsigned esd_interval;
+       unsigned ulps_timeout;
+
+       bool use_dsi_backlight;
+
+       struct omap_dsi_pin_config pin_config;
+};
+
+/**
+ * struct picodlp_panel_data - picodlp panel driver configuration data
+ * @picodlp_adapter_id:        i2c_adapter number for picodlp
+ */
+struct picodlp_panel_data {
+       int picodlp_adapter_id;
+       int emu_done_gpio;
+       int pwrgood_gpio;
+};
+
+/**
+ * struct tfp410_platform_data - tfp410 panel driver configuration data
+ * @i2c_bus_num: i2c bus id for the panel
+ * @power_down_gpio: gpio number for PD pin (or -1 if not available)
+ */
+struct tfp410_platform_data {
+       int i2c_bus_num;
+       int power_down_gpio;
+};
+
+/**
+ * sharp ls panel driver configuration data
+ * @resb_gpio: reset signal
+ * @ini_gpio: power on control
+ * @mo_gpio: selection for resolution(VGA/QVGA)
+ * @lr_gpio: selection for horizontal scanning direction
+ * @ud_gpio: selection for vertical scanning direction
+ */
+struct panel_sharp_ls037v7dw01_data {
+       int resb_gpio;
+       int ini_gpio;
+       int mo_gpio;
+       int lr_gpio;
+       int ud_gpio;
+};
+
+/**
+ * acx565akm panel driver configuration data
+ * @reset_gpio: reset signal
+ */
+struct panel_acx565akm_data {
+       int reset_gpio;
+};
+
+/**
+ * nec nl8048 panel driver configuration data
+ * @res_gpio: reset signal
+ * @qvga_gpio: selection for resolution(QVGA/WVGA)
+ */
+struct panel_nec_nl8048_data {
+       int res_gpio;
+       int qvga_gpio;
+};
+
+/**
+ * tpo td043 panel driver configuration data
+ * @nreset_gpio: reset signal
+ */
+struct panel_tpo_td043_data {
+       int nreset_gpio;
+};
+
+#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omap-panel-generic-dpi.h b/include/video/omap-panel-generic-dpi.h
deleted file mode 100644 (file)
index 127e3f2..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Header for generic DPI panel driver
- *
- * Copyright (C) 2010 Canonical Ltd.
- * Author: Bryan Wu <bryan.wu@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_PANEL_GENERIC_DPI_H
-#define __OMAP_PANEL_GENERIC_DPI_H
-
-struct omap_dss_device;
-
-/**
- * struct panel_generic_dpi_data - panel driver configuration data
- * @name: panel name
- * @platform_enable: platform specific panel enable function
- * @platform_disable: platform specific panel disable function
- */
-struct panel_generic_dpi_data {
-       const char *name;
-       int (*platform_enable)(struct omap_dss_device *dssdev);
-       void (*platform_disable)(struct omap_dss_device *dssdev);
-};
-
-#endif /* __OMAP_PANEL_GENERIC_DPI_H */
diff --git a/include/video/omap-panel-n8x0.h b/include/video/omap-panel-n8x0.h
deleted file mode 100644 (file)
index 50a1302..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __OMAP_PANEL_N8X0_H
-#define __OMAP_PANEL_N8X0_H
-
-struct omap_dss_device;
-
-struct panel_n8x0_data {
-       int (*platform_enable)(struct omap_dss_device *dssdev);
-       void (*platform_disable)(struct omap_dss_device *dssdev);
-       int panel_reset;
-       int ctrl_pwrdown;
-
-       int (*set_backlight)(struct omap_dss_device *dssdev, int level);
-};
-
-#endif
diff --git a/include/video/omap-panel-nokia-dsi.h b/include/video/omap-panel-nokia-dsi.h
deleted file mode 100644 (file)
index 04219a2..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __OMAP_NOKIA_DSI_PANEL_H
-#define __OMAP_NOKIA_DSI_PANEL_H
-
-struct omap_dss_device;
-
-/**
- * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration
- * @name: panel name
- * @use_ext_te: use external TE
- * @ext_te_gpio: external TE GPIO
- * @esd_interval: interval of ESD checks, 0 = disabled (ms)
- * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
- * @use_dsi_backlight: true if panel uses DSI command to control backlight
- * @pin_config: DSI pin configuration
- */
-struct nokia_dsi_panel_data {
-       const char *name;
-
-       int reset_gpio;
-
-       bool use_ext_te;
-       int ext_te_gpio;
-
-       unsigned esd_interval;
-       unsigned ulps_timeout;
-
-       bool use_dsi_backlight;
-
-       struct omap_dsi_pin_config pin_config;
-};
-
-#endif /* __OMAP_NOKIA_DSI_PANEL_H */
diff --git a/include/video/omap-panel-picodlp.h b/include/video/omap-panel-picodlp.h
deleted file mode 100644 (file)
index 1c342ef..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * panel data for picodlp panel
- *
- * Copyright (C) 2011 Texas Instruments
- *
- * Author: Mayuresh Janorkar <mayur@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __PANEL_PICODLP_H
-#define __PANEL_PICODLP_H
-/**
- * struct : picodlp panel data
- * picodlp_adapter_id: i2c_adapter number for picodlp
- */
-struct picodlp_panel_data {
-       int picodlp_adapter_id;
-       int emu_done_gpio;
-       int pwrgood_gpio;
-};
-#endif /* __PANEL_PICODLP_H */
diff --git a/include/video/omap-panel-tfp410.h b/include/video/omap-panel-tfp410.h
deleted file mode 100644 (file)
index aef35e4..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Header for TFP410 chip driver
- *
- * Copyright (C) 2011 Texas Instruments Inc
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_PANEL_TFP410_H
-#define __OMAP_PANEL_TFP410_H
-
-struct omap_dss_device;
-
-/**
- * struct tfp410_platform_data - panel driver configuration data
- * @i2c_bus_num: i2c bus id for the panel
- * @power_down_gpio: gpio number for PD pin (or -1 if not available)
- */
-struct tfp410_platform_data {
-       int i2c_bus_num;
-       int power_down_gpio;
-};
-
-#endif /* __OMAP_PANEL_TFP410_H */
index caefa093337d9da1d0ba440cdc98665d91bb1801..62ca9a77c1d65dff76823857e895f61280893a1b 100644 (file)
@@ -257,10 +257,31 @@ void rfbi_bus_unlock(void);
 
 /* DSI */
 
+enum omap_dss_dsi_trans_mode {
+       /* Sync Pulses: both sync start and end packets sent */
+       OMAP_DSS_DSI_PULSE_MODE,
+       /* Sync Events: only sync start packets sent */
+       OMAP_DSS_DSI_EVENT_MODE,
+       /* Burst: only sync start packets sent, pixels are time compressed */
+       OMAP_DSS_DSI_BURST_MODE,
+};
+
 struct omap_dss_dsi_videomode_timings {
+       unsigned long hsclk;
+
+       unsigned ndl;
+       unsigned bitspp;
+
+       /* pixels */
+       u16 hact;
+       /* lines */
+       u16 vact;
+
        /* DSI video mode blanking data */
        /* Unit: byte clock cycles */
+       u16 hss;
        u16 hsa;
+       u16 hse;
        u16 hfp;
        u16 hbp;
        /* Unit: line clocks */
@@ -274,14 +295,24 @@ struct omap_dss_dsi_videomode_timings {
        int hbp_blanking_mode;
        int hfp_blanking_mode;
 
-       /* Video port sync events */
-       bool vp_vsync_end;
-       bool vp_hsync_end;
+       enum omap_dss_dsi_trans_mode trans_mode;
 
        bool ddr_clk_always_on;
        int window_sync;
 };
 
+struct omap_dss_dsi_config {
+       enum omap_dss_dsi_mode mode;
+       enum omap_dss_dsi_pixel_format pixel_format;
+       const struct omap_video_timings *timings;
+
+       unsigned long hs_clk_min, hs_clk_max;
+       unsigned long lp_clk_min, lp_clk_max;
+
+       bool ddr_clk_always_on;
+       enum omap_dss_dsi_trans_mode trans_mode;
+};
+
 void dsi_bus_lock(struct omap_dss_device *dssdev);
 void dsi_bus_unlock(struct omap_dss_device *dssdev);
 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
@@ -541,9 +572,14 @@ struct omap_dss_writeback_info {
 struct omap_dss_output {
        struct list_head list;
 
+       const char *name;
+
        /* display type supported by the output */
        enum omap_display_type type;
 
+       /* DISPC channel for this output */
+       enum omap_channel dispc_channel;
+
        /* output instance */
        enum omap_dss_output_id id;
 
@@ -561,6 +597,7 @@ struct omap_dss_device {
 
        enum omap_display_type type;
 
+       /* obsolete, to be removed */
        enum omap_channel channel;
 
        union {
@@ -590,41 +627,11 @@ struct omap_dss_device {
                } venc;
        } phy;
 
-       struct {
-               struct {
-                       struct {
-                               u16 lck_div;
-                               u16 pck_div;
-                               enum omap_dss_clk_source lcd_clk_src;
-                       } channel;
-
-                       enum omap_dss_clk_source dispc_fclk_src;
-               } dispc;
-
-               struct {
-                       /* regn is one greater than TRM's REGN value */
-                       u16 regn;
-                       u16 regm;
-                       u16 regm_dispc;
-                       u16 regm_dsi;
-
-                       u16 lp_clk_div;
-                       enum omap_dss_clk_source dsi_fclk_src;
-               } dsi;
-
-               struct {
-                       /* regn is one greater than TRM's REGN value */
-                       u16 regn;
-                       u16 regm2;
-               } hdmi;
-       } clocks;
-
        struct {
                struct omap_video_timings timings;
 
                enum omap_dss_dsi_pixel_format dsi_pix_fmt;
                enum omap_dss_dsi_mode dsi_mode;
-               struct omap_dss_dsi_videomode_timings dsi_vm_timings;
        } panel;
 
        struct {
@@ -829,15 +836,8 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
                bool enable);
 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
-void omapdss_dsi_set_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings);
-void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h);
-void omapdss_dsi_set_pixel_format(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_pixel_format fmt);
-void omapdss_dsi_set_operation_mode(struct omap_dss_device *dssdev,
-               enum omap_dss_dsi_mode mode);
-void omapdss_dsi_set_videomode_timings(struct omap_dss_device *dssdev,
-               struct omap_dss_dsi_videomode_timings *timings);
+int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+               const struct omap_dss_dsi_config *config);
 
 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
                void (*callback)(int, void *), void *data);
@@ -846,8 +846,6 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id);
 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel);
 int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
                const struct omap_dsi_pin_config *pin_cfg);
-int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
-               unsigned long ddr_clk, unsigned long lp_clk);
 
 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev);
 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
index 01c3d62436ef8c44c4d0deffc3393bd2d02b8116..ffd4652de91ca390fff537f18f524300bf470246 100644 (file)
@@ -138,11 +138,21 @@ struct blkif_request_discard {
        uint8_t        _pad3;
 } __attribute__((__packed__));
 
+struct blkif_request_other {
+       uint8_t      _pad1;
+       blkif_vdev_t _pad2;        /* only for read/write requests         */
+#ifdef CONFIG_X86_64
+       uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
+#endif
+       uint64_t     id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_request_rw rw;
                struct blkif_request_discard discard;
+               struct blkif_request_other other;
        } u;
 } __attribute__((__packed__));
 
index 1844d31f45520fef57c93be90ba7e247e1a0b042..7000bb1f6e96fb018fcc5360cf6266d567f6121e 100644 (file)
@@ -251,6 +251,12 @@ struct physdev_pci_device_add {
 
 #define PHYSDEVOP_pci_device_remove     26
 #define PHYSDEVOP_restore_msi_ext       27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix          30
+#define PHYSDEVOP_release_msix          31
 struct physdev_pci_device {
     /* IN */
     uint16_t seg;
index 3953fda2e8bd182edcc76f790523e48f2698b962..e4e47f64744635bbebe26fda491ff64eda8e4161 100644 (file)
@@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
                         int flags, const char *dev_name,
                         void *data)
 {
-       if (!(flags & MS_KERNMOUNT))
-               data = current->nsproxy->ipc_ns;
+       if (!(flags & MS_KERNMOUNT)) {
+               struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+               /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
+                * over the ipc namespace.
+                */
+               if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+                       return ERR_PTR(-EPERM);
+
+               data = ns;
+       }
        return mount_ns(fs_type, flags, data, mqueue_fill_super);
 }
 
index 31cd1bf6af271771e67bdf6e29eb306371d3d130..fede1d06ef305cc59386bb4490c7e2e020b9f25d 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
                                                        goto out_unlock;
                                                break;
                                        }
+                                       msg = ERR_PTR(-EAGAIN);
                                } else
                                        break;
                                msg_counter++;
index 51e485ca993560015dbb56701fd249637afc721c..60bc027c61c3a3be8d5bc26b024ffac0a79f1af9 100644 (file)
@@ -835,7 +835,7 @@ void do_exit(long code)
        /*
         * Make sure we are holding no locks:
         */
-       debug_check_no_locks_held();
+       debug_check_no_locks_held(tsk);
        /*
         * We can do this unlocked here. The futex code uses this flag
         * just to verify whether the pi state cleanup has been done
index 259db207b5d90806536da88963e7d1b469e1b943..8a0efac4f99de7c5d0711bea6792db1f385f66eb 100644 (file)
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(void)
+static void print_held_locks_bug(struct task_struct *curr)
 {
        if (!debug_locks_off())
                return;
@@ -4097,21 +4097,22 @@ static void print_held_locks_bug(void)
 
        printk("\n");
        printk("=====================================\n");
-       printk("[ BUG: %s/%d still has locks held! ]\n",
-              current->comm, task_pid_nr(current));
+       printk("[ BUG: lock held at task exit time! ]\n");
        print_kernel_ident();
        printk("-------------------------------------\n");
-       lockdep_print_held_locks(current);
+       printk("%s/%d is exiting with locks still held!\n",
+               curr->comm, task_pid_nr(curr));
+       lockdep_print_held_locks(curr);
+
        printk("\nstack backtrace:\n");
        dump_stack();
 }
 
-void debug_check_no_locks_held(void)
+void debug_check_no_locks_held(struct task_struct *task)
 {
-       if (unlikely(current->lockdep_depth > 0))
-               print_held_locks_bug();
+       if (unlikely(task->lockdep_depth > 0))
+               print_held_locks_bug(task);
 }
-EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
index c1c3dc1c60233f337a01ff13587f1a5a7f57f1cd..bea15bdf82b04c28d2f5d8c1a32a46b2621cb295 100644 (file)
@@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        int nr;
        int rc;
        struct task_struct *task, *me = current;
+       int init_pids = thread_group_leader(me) ? 1 : 2;
 
        /* Don't allow any more processes into the pid namespace */
        disable_pid_allocation(pid_ns);
@@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
         */
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               if (pid_ns->nr_hashed == 1)
+               if (pid_ns->nr_hashed == init_pids)
                        break;
                schedule();
        }
index b1dc456474b57d80664560c202ce027cce73db13..463aa6736751a0a12d949ddeb93330b6067f053b 100644 (file)
@@ -4,6 +4,7 @@
  * Originally from swsusp.
  */
 
+#include <linux/console.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_kern.h>
 #include <linux/vt.h>
 
 static int orig_fgconsole, orig_kmsg;
 
+static DEFINE_MUTEX(vt_switch_mutex);
+
+struct pm_vt_switch {
+       struct list_head head;
+       struct device *dev;
+       bool required;
+};
+
+static LIST_HEAD(pm_vt_switch_list);
+
+
+/**
+ * pm_vt_switch_required - indicate VT switch at suspend requirements
+ * @dev: device
+ * @required: if true, caller needs VT switch at suspend/resume time
+ *
+ * The different console drivers may or may not require VT switches across
+ * suspend/resume, depending on how they handle restoring video state and
+ * what may be running.
+ *
+ * Drivers can indicate support for switchless suspend/resume, which can
+ * save time and flicker, by using this routine and passing 'false' as
+ * the argument.  If any loaded driver needs VT switching, or the
+ * no_console_suspend argument has been passed on the command line, VT
+ * switches will occur.
+ */
+void pm_vt_switch_required(struct device *dev, bool required)
+{
+       struct pm_vt_switch *entry, *tmp;
+
+       mutex_lock(&vt_switch_mutex);
+       list_for_each_entry(tmp, &pm_vt_switch_list, head) {
+               if (tmp->dev == dev) {
+                       /* already registered, update requirement */
+                       tmp->required = required;
+                       goto out;
+               }
+       }
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               goto out;
+
+       entry->required = required;
+       entry->dev = dev;
+
+       list_add(&entry->head, &pm_vt_switch_list);
+out:
+       mutex_unlock(&vt_switch_mutex);
+}
+EXPORT_SYMBOL(pm_vt_switch_required);
+
+/**
+ * pm_vt_switch_unregister - stop tracking a device's VT switching needs
+ * @dev: device
+ *
+ * Remove @dev from the vt switch list.
+ */
+void pm_vt_switch_unregister(struct device *dev)
+{
+       struct pm_vt_switch *tmp;
+
+       mutex_lock(&vt_switch_mutex);
+       list_for_each_entry(tmp, &pm_vt_switch_list, head) {
+               if (tmp->dev == dev) {
+                       list_del(&tmp->head);
+                       break;
+               }
+       }
+       mutex_unlock(&vt_switch_mutex);
+}
+EXPORT_SYMBOL(pm_vt_switch_unregister);
+
+/*
+ * There are three cases when a VT switch on suspend/resume are required:
+ *   1) no driver has indicated a requirement one way or another, so preserve
+ *      the old behavior
+ *   2) console suspend is disabled, we want to see debug messages across
+ *      suspend/resume
+ *   3) any registered driver indicates it needs a VT switch
+ *
+ * If none of these conditions is present, meaning we have at least one driver
+ * that doesn't need the switch, and none that do, we can avoid it to make
+ * resume look a little prettier (and suspend too, but that's usually hidden,
+ * e.g. when closing the lid on a laptop).
+ */
+static bool pm_vt_switch(void)
+{
+       struct pm_vt_switch *entry;
+       bool ret = true;
+
+       mutex_lock(&vt_switch_mutex);
+       if (list_empty(&pm_vt_switch_list))
+               goto out;
+
+       if (!console_suspend_enabled)
+               goto out;
+
+       list_for_each_entry(entry, &pm_vt_switch_list, head) {
+               if (entry->required)
+                       goto out;
+       }
+
+       ret = false;
+out:
+       mutex_unlock(&vt_switch_mutex);
+       return ret;
+}
+
 int pm_prepare_console(void)
 {
+       if (!pm_vt_switch())
+               return 0;
+
        orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
        if (orig_fgconsole < 0)
                return 1;
@@ -26,6 +139,9 @@ int pm_prepare_console(void)
 
 void pm_restore_console(void)
 {
+       if (!pm_vt_switch())
+               return;
+
        if (orig_fgconsole >= 0) {
                vt_move_to_console(orig_fgconsole, 0);
                vt_kmsg_redirect(orig_kmsg);
index 2fb8cb88df8d0296e655f320bc7d5cbce1a858c8..7f32fe0e52cd46489c8d90e4b85f9d74204aab16 100644 (file)
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  */
 int tick_check_broadcast_device(struct clock_event_device *dev)
 {
-       if ((tick_broadcast_device.evtdev &&
+       if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+           (tick_broadcast_device.evtdev &&
             tick_broadcast_device.evtdev->rating >= dev->rating) ||
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
index e81978e8c03b2b5f1a38d62248d8408fbfcd2d13..8e635a18ab521e48cfc4195cfb9f199de5e0c3b3 100644 (file)
@@ -51,6 +51,8 @@ struct user_namespace init_user_ns = {
        .owner = GLOBAL_ROOT_UID,
        .group = GLOBAL_ROOT_GID,
        .proc_inum = PROC_USER_INIT_INO,
+       .may_mount_sysfs = true,
+       .may_mount_proc = true,
 };
 EXPORT_SYMBOL_GPL(init_user_ns);
 
index b14f4d3420439ca629be08494ef417e734513499..a54f26f82eb250a60c7f24ecc651c6c1bbf8cc88 100644 (file)
@@ -61,6 +61,15 @@ int create_user_ns(struct cred *new)
        kgid_t group = new->egid;
        int ret;
 
+       /*
+        * Verify that we can not violate the policy of which files
+        * may be accessed that is specified by the root directory,
+        * by verifing that the root directory is at the root of the
+        * mount namespace which allows all files to be accessed.
+        */
+       if (current_chrooted())
+               return -EPERM;
+
        /* The creator needs a mapping in the parent user namespace
         * or else we won't be able to reasonably tell userspace who
         * created a user_namespace.
@@ -87,6 +96,8 @@ int create_user_ns(struct cred *new)
 
        set_cred_user_ns(new, ns);
 
+       update_mnt_policy(ns);
+
        return 0;
 }
 
index b83c144d731f3707402be43e83d2041ab4df82b7..a1cf8cae60e70dd298735f4feab09e55f8141e16 100644 (file)
@@ -401,7 +401,6 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
        piter->__pg_advance = 0;
        piter->__nents = nents;
 
-       piter->page = NULL;
        piter->sg = sglist;
        piter->sg_pgoffset = pgoffset;
 }
@@ -426,7 +425,6 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
                if (!--piter->__nents || !piter->sg)
                        return false;
        }
-       piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
 
        return true;
 }
@@ -496,7 +494,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
                miter->__remaining = min_t(unsigned long, miter->__remaining,
                                           PAGE_SIZE - miter->__offset);
        }
-       miter->page = miter->piter.page;
+       miter->page = sg_page_iter_page(&miter->piter);
        miter->consumed = miter->length = miter->__remaining;
 
        if (miter->__flags & SG_MITER_ATOMIC)
index 4723ac8d2fc200d5990f830b6f4c8c0fd2a9bbcf..87da3590c61e20287b4854e8212c178f96cb73c6 100644 (file)
@@ -204,10 +204,8 @@ get_write_lock:
                        unsigned long addr;
                        struct file *file = get_file(vma->vm_file);
 
-                       vm_flags = vma->vm_flags;
-                       if (!(flags & MAP_NONBLOCK))
-                               vm_flags |= VM_POPULATE;
-                       addr = mmap_region(file, start, size, vm_flags, pgoff);
+                       addr = mmap_region(file, start, size,
+                                       vma->vm_flags, pgoff);
                        fput(file);
                        if (IS_ERR_VALUE(addr)) {
                                err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
                mutex_unlock(&mapping->i_mmap_mutex);
        }
 
-       if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
-               if (!has_write_lock)
-                       goto get_write_lock;
-               vma->vm_flags |= VM_POPULATE;
-       }
-
        if (vma->vm_flags & VM_LOCKED) {
                /*
                 * drop PG_Mlocked flag for over-mapped range
index 1c5e33fce6391ec1ea7569b7d324fc76aa9fa288..79b7cf7d1bca72cee9babfb60e21a38799c8eba1 100644 (file)
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
 
                newflags = vma->vm_flags & ~VM_LOCKED;
                if (on)
-                       newflags |= VM_LOCKED | VM_POPULATE;
+                       newflags |= VM_LOCKED;
 
                tmp = vma->vm_end;
                if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
                 * range with the first VMA. Also, skip undesirable VMA types.
                 */
                nend = min(end, vma->vm_end);
-               if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
-                   VM_POPULATE)
+               if (vma->vm_flags & (VM_IO | VM_PFNMAP))
                        continue;
                if (nstart < vma->vm_start)
                        nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
        struct vm_area_struct * vma, * prev = NULL;
 
        if (flags & MCL_FUTURE)
-               current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
+               current->mm->def_flags |= VM_LOCKED;
        else
-               current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
+               current->mm->def_flags &= ~VM_LOCKED;
        if (flags == MCL_FUTURE)
                goto out;
 
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
 
                newflags = vma->vm_flags & ~VM_LOCKED;
                if (flags & MCL_CURRENT)
-                       newflags |= VM_LOCKED | VM_POPULATE;
+                       newflags |= VM_LOCKED;
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
index 2664a47cec93721d6c73aafa85f32c11a5fb13e6..0db0de1c2fbee21e0e9919af66021b190d789731 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        }
 
        addr = mmap_region(file, addr, len, vm_flags, pgoff);
-       if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE))
+       if (!IS_ERR_VALUE(addr) &&
+           ((vm_flags & VM_LOCKED) ||
+            (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
                *populate = len;
        return addr;
 }
@@ -1938,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 
        /* Check the cache first. */
        /* (Cache hit rate is typically around 35%.) */
-       vma = mm->mmap_cache;
+       vma = ACCESS_ONCE(mm->mmap_cache);
        if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
                struct rb_node *rb_node;
 
index e19328087534af9f77371866c9afe30c70e3ff89..2f3ea749c3184057559cc06ef60960d39bb4bac2 100644 (file)
@@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
        struct vm_area_struct *vma;
 
        /* check the cache first */
-       vma = mm->mmap_cache;
+       vma = ACCESS_ONCE(mm->mmap_cache);
        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
                return vma;
 
index a18714469bf791299f0d60b1a38b9886fdf4dfff..85addcd9372b0d8abfbd277d332408900efdea1d 100644 (file)
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        grp = &vlan_info->grp;
 
-       /* Take it out of our own structures, but be sure to interlock with
-        * HW accelerating devices or SW vlan input packet processing if
-        * VLAN is not 0 (leave it there for 802.1p).
-        */
-       if (vlan_id)
-               vlan_vid_del(real_dev, vlan_id);
-
        grp->nr_vlan_devs--;
 
        if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_uninit_applicant(real_dev);
        }
 
+       /* Take it out of our own structures, but be sure to interlock with
+        * HW accelerating devices or SW vlan input packet processing if
+        * VLAN is not 0 (leave it there for 802.1p).
+        */
+       if (vlan_id)
+               vlan_vid_del(real_dev, vlan_id);
+
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
 }
index 79d87d8d4f514f5529b57f1b89119a735d068f69..fad0302bdb325e5df700edf4fe323c6226216599 100644 (file)
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)
                        sco_chan_del(sk, ECONNRESET);
                break;
 
+       case BT_CONNECT2:
        case BT_CONNECT:
        case BT_DISCONN:
                sco_chan_del(sk, ECONNRESET);
index b0812c91c0f0ea6d4010722990a044c72d93ba36..bab338e6270df4604efe792f40025a40d7682347 100644 (file)
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                        return 0;
                br_warn(br, "adding interface %s with same address "
                       "as a received packet\n",
-                      source->dev->name);
+                      source ? source->dev->name : br->dev->name);
                fdb_delete(br, fdb);
        }
 
index d540ced1f6c66dc5b84a8ef06c24199e747a0f86..13e6447f03987b49b32695bec2f4e50684e78115 100644 (file)
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
                return;
        }
 #endif
-       WARN_ON(in_interrupt());
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb_orphan(skb);
-       nf_reset(skb);
 
        if (unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
@@ -3315,6 +3313,7 @@ int netdev_rx_handler_register(struct net_device *dev,
        if (dev->rx_handler)
                return -EBUSY;
 
+       /* Note: rx_handler_data must be set before rx_handler */
        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
        rcu_assign_pointer(dev->rx_handler, rx_handler);
 
@@ -3335,6 +3334,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 
        ASSERT_RTNL();
        RCU_INIT_POINTER(dev->rx_handler, NULL);
+       /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
+        * section has a guarantee to see a non NULL rx_handler_data
+        * as well.
+        */
+       synchronize_net();
        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
index c56ea6f7f6c7c88acbe602a3470c979622345713..2bfd081c59f7706cbd9d8a5ba8a5780aaaae23d1 100644 (file)
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
        struct flow_flush_info *info = data;
        struct tasklet_struct *tasklet;
 
-       tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
+       tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
        tasklet->data = (unsigned long)info;
        tasklet_schedule(tasklet);
 }
index 9d4c7201400d154ab2fd00ccfab4f22f8bddb5ff..e187bf06d673b0ec37d401a8389bba372a8fb3d9 100644 (file)
@@ -140,6 +140,8 @@ ipv6:
                        flow->ports = *ports;
        }
 
+       flow->thoff = (u16) nhoff;
+
        return true;
 }
 EXPORT_SYMBOL(skb_flow_dissect);
index 5fb8d7e472941fede3595a3a4032cfc21fbe849b..b65441da74abd85cc8deacc7650c583013e9dcfb 100644 (file)
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
        }
        if (ops->fill_info) {
                data = nla_nest_start(skb, IFLA_INFO_DATA);
-               if (data == NULL)
+               if (data == NULL) {
+                       err = -EMSGSIZE;
                        goto err_cancel_link;
+               }
                err = ops->fill_info(skb, dev);
                if (err < 0)
                        goto err_cancel_data;
index 905dcc6ad1e3b480c01f87df5157f4e37de112a1..2dc6cdaaae8abc5f31afa57a7ccf765cc978b6ac 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/security.h>
+#include <linux/pid_namespace.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)
        if (!uid_valid(uid) || !gid_valid(gid))
                return -EINVAL;
 
-       if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
+       if ((creds->pid == task_tgid_vnr(current) ||
+            ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
            ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
              uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
            ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
index 68f6a94f7661999095d9a02539aee956cd52e8cc..c929d9c1c4b60d719d60066f7ca06e4b7d296f59 100644 (file)
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                iph->frag_off |= htons(IP_MF);
                        offset += (skb->len - skb->mac_len - iph->ihl * 4);
                } else  {
-                       if (!(iph->frag_off & htons(IP_DF)))
-                               iph->id = htons(id++);
+                       iph->id = htons(id++);
                }
                iph->tot_len = htons(skb->len - skb->mac_len);
                iph->check = 0;
index 98cbc6877019428f7d3400632c1e81e50145a814..bf6c5cf31aed27ccba4aa25c4d049f0cc037e3cb 100644 (file)
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
                }
        for (i++; i < CONF_NAMESERVERS_MAX; i++)
                if (ic_nameservers[i] != NONE)
-                       pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
+                       pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
+       pr_cont("\n");
 #endif /* !SILENT */
 
        return 0;
index ce2d43e1f09f6b4983e6635e6c2b48ca56fcf18b..0d755c50994b2ca01438158e033c020810a3e8af 100644 (file)
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
-config IP_NF_QUEUE
-       tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
-       depends on NETFILTER_ADVANCED
-       help
-         Netfilter has the ability to queue packets to user space: the
-         netlink device can be used to access them using this driver.
-
-         This option enables the old IPv4-only "ip_queue" implementation
-         which has been obsoleted by the new "nfnetlink_queue" code (see
-         CONFIG_NETFILTER_NETLINK_QUEUE).
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
index 0d9bdacce99f46a77982d89c86a7764fe9ef15f8..3bd55bad230ac7f822f4ea75eb280bc8ce0c07fc 100644 (file)
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (tcp_is_reno(tp))
                tcp_reset_reno_sack(tp);
 
-       if (!how) {
-               /* Push undo marker, if it was plain RTO and nothing
-                * was retransmitted. */
-               tp->undo_marker = tp->snd_una;
-       } else {
+       tp->undo_marker = tp->snd_una;
+       if (how) {
                tp->sacked_out = 0;
                tp->fackets_out = 0;
        }
index 817fbb396bc80077e23b03358355aca6ece6deba..5d0b4387cba6df401166a48f1e4cf3800d6ce6ff 100644 (file)
@@ -1809,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
                        goto send_now;
        }
 
-       /* Ok, it looks like it is advisable to defer.  */
-       tp->tso_deferred = 1 | (jiffies << 1);
+       /* Ok, it looks like it is advisable to defer.
+        * Do not rearm the timer if already set to not break TCP ACK clocking.
+        */
+       if (!tp->tso_deferred)
+               tp->tso_deferred = 1 | (jiffies << 1);
 
        return true;
 
index 265c42cf963c30cce55016bbf4ef0a13f57e6ec5..0a073a263720c4eab823db2c09b1b18d7b510bcd 100644 (file)
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
 
 void udp_destroy_sock(struct sock *sk)
 {
+       struct udp_sock *up = udp_sk(sk);
        bool slow = lock_sock_fast(sk);
        udp_flush_pending_frames(sk);
        unlock_sock_fast(sk, slow);
+       if (static_key_false(&udp_encap_needed) && up->encap_type) {
+               void (*encap_destroy)(struct sock *sk);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
+               if (encap_destroy)
+                       encap_destroy(sk);
+       }
 }
 
 /*
index f2c7e615f902d861d99529536ecebff35fbd7fd5..a459c4f5b76914e031798a4b90b3df1998f47a08 100644 (file)
@@ -2529,6 +2529,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
 static void init_loopback(struct net_device *dev)
 {
        struct inet6_dev  *idev;
+       struct net_device *sp_dev;
+       struct inet6_ifaddr *sp_ifa;
+       struct rt6_info *sp_rt;
 
        /* ::1 */
 
@@ -2540,6 +2543,30 @@ static void init_loopback(struct net_device *dev)
        }
 
        add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
+
+       /* Add routes to other interface's IPv6 addresses */
+       for_each_netdev(dev_net(dev), sp_dev) {
+               if (!strcmp(sp_dev->name, dev->name))
+                       continue;
+
+               idev = __in6_dev_get(sp_dev);
+               if (!idev)
+                       continue;
+
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
+
+                       if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+                               continue;
+
+                       sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+                       /* Failure cases are ignored */
+                       if (!IS_ERR(sp_rt))
+                               ip6_ins_rt(sp_rt);
+               }
+               read_unlock_bh(&idev->lock);
+       }
 }
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
@@ -4784,26 +4811,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
 
 static int __net_init addrconf_init_net(struct net *net)
 {
-       int err;
+       int err = -ENOMEM;
        struct ipv6_devconf *all, *dflt;
 
-       err = -ENOMEM;
-       all = &ipv6_devconf;
-       dflt = &ipv6_devconf_dflt;
+       all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
+       if (all == NULL)
+               goto err_alloc_all;
 
-       if (!net_eq(net, &init_net)) {
-               all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
-               if (all == NULL)
-                       goto err_alloc_all;
+       dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+       if (dflt == NULL)
+               goto err_alloc_dflt;
 
-               dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
-               if (dflt == NULL)
-                       goto err_alloc_dflt;
-       } else {
-               /* these will be inherited by all namespaces */
-               dflt->autoconf = ipv6_defaults.autoconf;
-               dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
-       }
+       /* these will be inherited by all namespaces */
+       dflt->autoconf = ipv6_defaults.autoconf;
+       dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
 
        net->ipv6.devconf_all = all;
        net->ipv6.devconf_dflt = dflt;
index e33fe0ab2568ec5a750e846457ff52a6a3395c86..2bab2aa597450813ae4bd60d362998830a7e7e3b 100644 (file)
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
            ipv6_addr_loopback(&hdr->daddr))
                goto err;
 
+       /* RFC4291 Errata ID: 3480
+        * Interface-Local scope spans only a single interface on a
+        * node and is useful only for loopback transmission of
+        * multicast.  Packets with interface-local scope received
+        * from another node must be discarded.
+        */
+       if (!(skb->pkt_type == PACKET_LOOPBACK ||
+             dev->flags & IFF_LOOPBACK) &&
+           ipv6_addr_is_multicast(&hdr->daddr) &&
+           IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
+               goto err;
+
        /* RFC4291 2.7
         * Nodes must not originate a packet to a multicast address whose scope
         * field contains the reserved value 0; if such a packet is received, it
index 83acc1405a18dcef218625e8517431978393ac12..33608c610276d87e9845b75555fadc6771765606 100644 (file)
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
        {
                .name           = "SNPT",
+               .table          = "mangle",
                .target         = ip6t_snpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
                .checkentry     = ip6t_npt_checkentry,
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
        },
        {
                .name           = "DNPT",
+               .table          = "mangle",
                .target         = ip6t_dnpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
                .checkentry     = ip6t_npt_checkentry,
index 599e1ba6d1ceaa6766d53eda020a69a0aa234e5b..d8e5e852fc7a08b26879c81a4ecaad0be2cf2f90 100644 (file)
@@ -1285,10 +1285,18 @@ do_confirm:
 
 void udpv6_destroy_sock(struct sock *sk)
 {
+       struct udp_sock *up = udp_sk(sk);
        lock_sock(sk);
        udp_v6_flush_pending_frames(sk);
        release_sock(sk);
 
+       if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
+               void (*encap_destroy)(struct sock *sk);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
+               if (encap_destroy)
+                       encap_destroy(sk);
+       }
+
        inet6_destroy_sock(sk);
 }
 
index d07e3a626446b3cab44e68b019f7870619699f75..d28e7f014cc639779a4557203b4b2ba2f7e63927 100644 (file)
@@ -2583,8 +2583,10 @@ bed:
                                    NULL, NULL, NULL);
 
                /* Check if the we got some results */
-               if (!self->cachedaddr)
-                       return -EAGAIN;         /* Didn't find any devices */
+               if (!self->cachedaddr) {
+                       err = -EAGAIN;          /* Didn't find any devices */
+                       goto out;
+               }
                daddr = self->cachedaddr;
                /* Cleanup */
                self->cachedaddr = 0;
index 8555f331ea60d4bca67cbe91390ed077f62a091e..5b1e5af257137e4c6a03a2c575f1adb5a949e25e 100644 (file)
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
+       hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
index d36875f3427e82b7795012bebe8281318f983d22..8aecf5df66569a6ca0ae1b8921e20954d6812336 100644 (file)
@@ -114,7 +114,6 @@ struct l2tp_net {
 
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
-static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
 
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
        } else {
                /* Socket is owned by kernelspace */
                sk = tunnel->sock;
+               sock_hold(sk);
        }
 
 out:
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
                }
                sock_put(sk);
        }
+       sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
 
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
        struct sk_buff *skbp;
        struct sk_buff *tmp;
        u32 ns = L2TP_SKB_CB(skb)->ns;
-       struct l2tp_stats *sstats;
 
        spin_lock_bh(&session->reorder_q.lock);
-       sstats = &session->stats;
        skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
                if (L2TP_SKB_CB(skbp)->ns > ns) {
                        __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
                                 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
                                 session->name, ns, L2TP_SKB_CB(skbp)->ns,
                                 skb_queue_len(&session->reorder_q));
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_oos_packets++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_oos_packets);
                        goto out;
                }
        }
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
        int length = L2TP_SKB_CB(skb)->length;
-       struct l2tp_stats *tstats, *sstats;
 
        /* We're about to requeue the skb, so return resources
         * to its current owner (a socket receive buffer).
         */
        skb_orphan(skb);
 
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       sstats = &session->stats;
-       u64_stats_update_begin(&sstats->syncp);
-       tstats->rx_packets++;
-       tstats->rx_bytes += length;
-       sstats->rx_packets++;
-       sstats->rx_bytes += length;
-       u64_stats_update_end(&tstats->syncp);
-       u64_stats_update_end(&sstats->syncp);
+       atomic_long_inc(&tunnel->stats.rx_packets);
+       atomic_long_add(length, &tunnel->stats.rx_bytes);
+       atomic_long_inc(&session->stats.rx_packets);
+       atomic_long_add(length, &session->stats.rx_bytes);
 
        if (L2TP_SKB_CB(skb)->has_seq) {
                /* Bump our Nr */
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
 {
        struct sk_buff *skb;
        struct sk_buff *tmp;
-       struct l2tp_stats *sstats;
 
        /* If the pkt at the head of the queue has the nr that we
         * expect to send up next, dequeue it and any other
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
         */
 start:
        spin_lock_bh(&session->reorder_q.lock);
-       sstats = &session->stats;
        skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
                if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       sstats->rx_errors++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
+                       atomic_long_inc(&session->stats.rx_errors);
                        l2tp_dbg(session, L2TP_MSG_SEQ,
                                 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
                                 session->name, L2TP_SKB_CB(skb)->ns,
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        struct l2tp_tunnel *tunnel = session->tunnel;
        int offset;
        u32 ns, nr;
-       struct l2tp_stats *sstats = &session->stats;
 
        /* The ref count is increased since we now hold a pointer to
         * the session. Take care to decrement the refcnt when exiting
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                                  "%s: cookie mismatch (%u/%u). Discarding.\n",
                                  tunnel->name, tunnel->tunnel_id,
                                  session->session_id);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_cookie_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_cookie_discards);
                        goto discard;
                }
                ptr += session->peer_cookie_len;
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        l2tp_warn(session, L2TP_MSG_SEQ,
                                  "%s: recv data has no seq numbers when required. Discarding.\n",
                                  session->name);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
                        goto discard;
                }
 
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        l2tp_warn(session, L2TP_MSG_SEQ,
                                  "%s: recv data has no seq numbers when required. Discarding.\n",
                                  session->name);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
                        goto discard;
                }
        }
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                         * packets
                         */
                        if (L2TP_SKB_CB(skb)->ns != session->nr) {
-                               u64_stats_update_begin(&sstats->syncp);
-                               sstats->rx_seq_discards++;
-                               u64_stats_update_end(&sstats->syncp);
+                               atomic_long_inc(&session->stats.rx_seq_discards);
                                l2tp_dbg(session, L2TP_MSG_SEQ,
                                         "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
                                         session->name, L2TP_SKB_CB(skb)->ns,
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        return;
 
 discard:
-       u64_stats_update_begin(&sstats->syncp);
-       sstats->rx_errors++;
-       u64_stats_update_end(&sstats->syncp);
+       atomic_long_inc(&session->stats.rx_errors);
        kfree_skb(skb);
 
        if (session->deref)
@@ -828,6 +803,23 @@ discard:
 }
 EXPORT_SYMBOL(l2tp_recv_common);
 
+/* Drop skbs from the session's reorder_q
+ */
+int l2tp_session_queue_purge(struct l2tp_session *session)
+{
+       struct sk_buff *skb = NULL;
+       BUG_ON(!session);
+       BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+       while ((skb = skb_dequeue(&session->reorder_q))) {
+               atomic_long_inc(&session->stats.rx_errors);
+               kfree_skb(skb);
+               if (session->deref)
+                       (*session->deref)(session);
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
+
 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
  * here. The skb is not on a list when we get here.
  * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        u32 tunnel_id, session_id;
        u16 version;
        int length;
-       struct l2tp_stats *tstats;
 
        if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
                goto discard_bad_csum;
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
 discard_bad_csum:
        LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
        UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       tstats->rx_errors++;
-       u64_stats_update_end(&tstats->syncp);
+       atomic_long_inc(&tunnel->stats.rx_errors);
        kfree_skb(skb);
 
        return 0;
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        struct l2tp_tunnel *tunnel = session->tunnel;
        unsigned int len = skb->len;
        int error;
-       struct l2tp_stats *tstats, *sstats;
 
        /* Debug */
        if (session->send_seq)
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
                error = ip_queue_xmit(skb, fl);
 
        /* Update stats */
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       sstats = &session->stats;
-       u64_stats_update_begin(&sstats->syncp);
        if (error >= 0) {
-               tstats->tx_packets++;
-               tstats->tx_bytes += len;
-               sstats->tx_packets++;
-               sstats->tx_bytes += len;
+               atomic_long_inc(&tunnel->stats.tx_packets);
+               atomic_long_add(len, &tunnel->stats.tx_bytes);
+               atomic_long_inc(&session->stats.tx_packets);
+               atomic_long_add(len, &session->stats.tx_bytes);
        } else {
-               tstats->tx_errors++;
-               sstats->tx_errors++;
+               atomic_long_inc(&tunnel->stats.tx_errors);
+               atomic_long_inc(&session->stats.tx_errors);
        }
-       u64_stats_update_end(&tstats->syncp);
-       u64_stats_update_end(&sstats->syncp);
 
        return 0;
 }
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
                /* No longer an encapsulation socket. See net/ipv4/udp.c */
                (udp_sk(sk))->encap_type = 0;
                (udp_sk(sk))->encap_rcv = NULL;
+               (udp_sk(sk))->encap_destroy = NULL;
                break;
        case L2TP_ENCAPTYPE_IP:
                break;
@@ -1311,7 +1293,7 @@ end:
 
 /* When the tunnel is closed, all the attached sessions need to go too.
  */
-static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
 {
        int hash;
        struct hlist_node *walk;
@@ -1334,25 +1316,13 @@ again:
 
                        hlist_del_init(&session->hlist);
 
-                       /* Since we should hold the sock lock while
-                        * doing any unbinding, we need to release the
-                        * lock we're holding before taking that lock.
-                        * Hold a reference to the sock so it doesn't
-                        * disappear as we're jumping between locks.
-                        */
                        if (session->ref != NULL)
                                (*session->ref)(session);
 
                        write_unlock_bh(&tunnel->hlist_lock);
 
-                       if (tunnel->version != L2TP_HDR_VER_2) {
-                               struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
-                               spin_lock_bh(&pn->l2tp_session_hlist_lock);
-                               hlist_del_init_rcu(&session->global_hlist);
-                               spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-                               synchronize_rcu();
-                       }
+                       __l2tp_session_unhash(session);
+                       l2tp_session_queue_purge(session);
 
                        if (session->session_close != NULL)
                                (*session->session_close)(session);
@@ -1360,6 +1330,8 @@ again:
                        if (session->deref != NULL)
                                (*session->deref)(session);
 
+                       l2tp_session_dec_refcount(session);
+
                        write_lock_bh(&tunnel->hlist_lock);
 
                        /* Now restart from the beginning of this hash
@@ -1372,6 +1344,17 @@ again:
        }
        write_unlock_bh(&tunnel->hlist_lock);
 }
+EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
+
+/* Tunnel socket destroy hook for UDP encapsulation */
+static void l2tp_udp_encap_destroy(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+}
 
 /* Really kill the tunnel.
  * Come here only when all sessions have been cleared from the tunnel.
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
                return;
 
        sock = sk->sk_socket;
-       BUG_ON(!sock);
 
-       /* If the tunnel socket was created directly by the kernel, use the
-        * sk_* API to release the socket now.  Otherwise go through the
-        * inet_* layer to shut the socket down, and let userspace close it.
+       /* If the tunnel socket was created by userspace, then go through the
+        * inet layer to shut the socket down, and let userspace close it.
+        * Otherwise, if we created the socket directly within the kernel, use
+        * the sk API to release it here.
         * In either case the tunnel resources are freed in the socket
         * destructor when the tunnel socket goes away.
         */
-       if (sock->file == NULL) {
-               kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sk);
+       if (tunnel->fd >= 0) {
+               if (sock)
+                       inet_shutdown(sock, 2);
        } else {
-               inet_shutdown(sock, 2);
+               if (sock)
+                       kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sk);
        }
 
        l2tp_tunnel_sock_put(sk);
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
                udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
                udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+               udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
                if (sk->sk_family == PF_INET6)
                        udpv6_encap_enable();
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+       l2tp_tunnel_closeall(tunnel);
        return (false == queue_work(l2tp_wq, &tunnel->del_work));
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
  */
 void l2tp_session_free(struct l2tp_session *session)
 {
-       struct l2tp_tunnel *tunnel;
+       struct l2tp_tunnel *tunnel = session->tunnel;
 
        BUG_ON(atomic_read(&session->ref_count) != 0);
 
-       tunnel = session->tunnel;
-       if (tunnel != NULL) {
+       if (tunnel) {
                BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+               if (session->session_id != 0)
+                       atomic_dec(&l2tp_session_count);
+               sock_put(tunnel->sock);
+               session->tunnel = NULL;
+               l2tp_tunnel_dec_refcount(tunnel);
+       }
+
+       kfree(session);
 
-               /* Delete the session from the hash */
+       return;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_free);
+
+/* Remove an l2tp session from l2tp_core's hash lists.
+ * Provides a tidyup interface for pseudowire code which can't just route all
+ * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
+ * callback.
+ */
+void __l2tp_session_unhash(struct l2tp_session *session)
+{
+       struct l2tp_tunnel *tunnel = session->tunnel;
+
+       /* Remove the session from core hashes */
+       if (tunnel) {
+               /* Remove from the per-tunnel hash */
                write_lock_bh(&tunnel->hlist_lock);
                hlist_del_init(&session->hlist);
                write_unlock_bh(&tunnel->hlist_lock);
 
-               /* Unlink from the global hash if not L2TPv2 */
+               /* For L2TPv3 we have a per-net hash: remove from there, too */
                if (tunnel->version != L2TP_HDR_VER_2) {
                        struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
                        spin_lock_bh(&pn->l2tp_session_hlist_lock);
                        hlist_del_init_rcu(&session->global_hlist);
                        spin_unlock_bh(&pn->l2tp_session_hlist_lock);
                        synchronize_rcu();
                }
-
-               if (session->session_id != 0)
-                       atomic_dec(&l2tp_session_count);
-
-               sock_put(tunnel->sock);
-
-               /* This will delete the tunnel context if this
-                * is the last session on the tunnel.
-                */
-               session->tunnel = NULL;
-               l2tp_tunnel_dec_refcount(tunnel);
        }
-
-       kfree(session);
-
-       return;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_free);
+EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
 
 /* This function is used by the netlink SESSION_DELETE command and by
    pseudowire modules.
  */
 int l2tp_session_delete(struct l2tp_session *session)
 {
+       if (session->ref)
+               (*session->ref)(session);
+       __l2tp_session_unhash(session);
+       l2tp_session_queue_purge(session);
        if (session->session_close != NULL)
                (*session->session_close)(session);
-
+       if (session->deref)
+               (*session->ref)(session);
        l2tp_session_dec_refcount(session);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_session_delete);
 
-
 /* We come here whenever a session's send_seq, cookie_len or
  * l2specific_len parameters are set.
  */
index 8eb8f1d47f3ac2d6ce32ae287e120a93aa931b40..485a490fd990eeb91a3577e65844c17d1350f720 100644 (file)
@@ -36,16 +36,15 @@ enum {
 struct sk_buff;
 
 struct l2tp_stats {
-       u64                     tx_packets;
-       u64                     tx_bytes;
-       u64                     tx_errors;
-       u64                     rx_packets;
-       u64                     rx_bytes;
-       u64                     rx_seq_discards;
-       u64                     rx_oos_packets;
-       u64                     rx_errors;
-       u64                     rx_cookie_discards;
-       struct u64_stats_sync   syncp;
+       atomic_long_t           tx_packets;
+       atomic_long_t           tx_bytes;
+       atomic_long_t           tx_errors;
+       atomic_long_t           rx_packets;
+       atomic_long_t           rx_bytes;
+       atomic_long_t           rx_seq_discards;
+       atomic_long_t           rx_oos_packets;
+       atomic_long_t           rx_errors;
+       atomic_long_t           rx_cookie_discards;
 };
 
 struct l2tp_tunnel;
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
 extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
 
 extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
+extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
 extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+extern void __l2tp_session_unhash(struct l2tp_session *session);
 extern int l2tp_session_delete(struct l2tp_session *session);
 extern void l2tp_session_free(struct l2tp_session *session);
 extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_session_queue_purge(struct l2tp_session *session);
 extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 
 extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
index c3813bc84552cebc56691cae4eae3097b29d2160..072d7202e182ffa2125fe7de9c54c584626fdeb0 100644 (file)
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
                   tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
                   atomic_read(&tunnel->ref_count));
 
-       seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+       seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
                   tunnel->debug,
-                  (unsigned long long)tunnel->stats.tx_packets,
-                  (unsigned long long)tunnel->stats.tx_bytes,
-                  (unsigned long long)tunnel->stats.tx_errors,
-                  (unsigned long long)tunnel->stats.rx_packets,
-                  (unsigned long long)tunnel->stats.rx_bytes,
-                  (unsigned long long)tunnel->stats.rx_errors);
+                  atomic_long_read(&tunnel->stats.tx_packets),
+                  atomic_long_read(&tunnel->stats.tx_bytes),
+                  atomic_long_read(&tunnel->stats.tx_errors),
+                  atomic_long_read(&tunnel->stats.rx_packets),
+                  atomic_long_read(&tunnel->stats.rx_bytes),
+                  atomic_long_read(&tunnel->stats.rx_errors));
 
        if (tunnel->show != NULL)
                tunnel->show(m, tunnel);
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                seq_printf(m, "\n");
        }
 
-       seq_printf(m, "   %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+       seq_printf(m, "   %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
                   session->nr, session->ns,
-                  (unsigned long long)session->stats.tx_packets,
-                  (unsigned long long)session->stats.tx_bytes,
-                  (unsigned long long)session->stats.tx_errors,
-                  (unsigned long long)session->stats.rx_packets,
-                  (unsigned long long)session->stats.rx_bytes,
-                  (unsigned long long)session->stats.rx_errors);
+                  atomic_long_read(&session->stats.tx_packets),
+                  atomic_long_read(&session->stats.tx_bytes),
+                  atomic_long_read(&session->stats.tx_errors),
+                  atomic_long_read(&session->stats.rx_packets),
+                  atomic_long_read(&session->stats.rx_bytes),
+                  atomic_long_read(&session->stats.rx_errors));
 
        if (session->show != NULL)
                session->show(m, session);
index 7f41b7051269539efa0780b618be0b60e7788e76..571db8dd2292a7c5b1f2e940073edce370b13699 100644 (file)
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
 static void l2tp_ip_destroy_sock(struct sock *sk)
 {
        struct sk_buff *skb;
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
 
        while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
                kfree_skb(skb);
 
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+
        sk_refcnt_debug_dec(sk);
 }
 
index 41f2f8126ebc720933638f67eb800f17797987e7..c74f5a91ff6a3213209c23ff488f0b475a552857 100644 (file)
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
 
 static void l2tp_ip6_destroy_sock(struct sock *sk)
 {
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+
        lock_sock(sk);
        ip6_flush_pending_frames(sk);
        release_sock(sk);
 
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+
        inet6_destroy_sock(sk);
 }
 
index c1bab22db85e79765581079468a36231e5f3d608..0825ff26e113f3e2acdbbe2b88bd0f4fbfd92410 100644 (file)
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
 #if IS_ENABLED(CONFIG_IPV6)
        struct ipv6_pinfo *np = NULL;
 #endif
-       struct l2tp_stats stats;
-       unsigned int start;
 
        hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
                          L2TP_CMD_TUNNEL_GET);
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        if (nest == NULL)
                goto nla_put_failure;
 
-       do {
-               start = u64_stats_fetch_begin(&tunnel->stats.syncp);
-               stats.tx_packets = tunnel->stats.tx_packets;
-               stats.tx_bytes = tunnel->stats.tx_bytes;
-               stats.tx_errors = tunnel->stats.tx_errors;
-               stats.rx_packets = tunnel->stats.rx_packets;
-               stats.rx_bytes = tunnel->stats.rx_bytes;
-               stats.rx_errors = tunnel->stats.rx_errors;
-               stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
-               stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
-       } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
-
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
+                   atomic_long_read(&tunnel->stats.tx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
+                   atomic_long_read(&tunnel->stats.tx_bytes)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
+                   atomic_long_read(&tunnel->stats.tx_errors)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
+                   atomic_long_read(&tunnel->stats.rx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
+                   atomic_long_read(&tunnel->stats.rx_bytes)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-                       stats.rx_seq_discards) ||
+                   atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-                       stats.rx_oos_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+                   atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
+                   atomic_long_read(&tunnel->stats.rx_errors)))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        struct nlattr *nest;
        struct l2tp_tunnel *tunnel = session->tunnel;
        struct sock *sk = NULL;
-       struct l2tp_stats stats;
-       unsigned int start;
 
        sk = tunnel->sock;
 
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        if (nest == NULL)
                goto nla_put_failure;
 
-       do {
-               start = u64_stats_fetch_begin(&session->stats.syncp);
-               stats.tx_packets = session->stats.tx_packets;
-               stats.tx_bytes = session->stats.tx_bytes;
-               stats.tx_errors = session->stats.tx_errors;
-               stats.rx_packets = session->stats.rx_packets;
-               stats.rx_bytes = session->stats.rx_bytes;
-               stats.rx_errors = session->stats.rx_errors;
-               stats.rx_seq_discards = session->stats.rx_seq_discards;
-               stats.rx_oos_packets = session->stats.rx_oos_packets;
-       } while (u64_stats_fetch_retry(&session->stats.syncp, start));
-
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
+               atomic_long_read(&session->stats.tx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
+               atomic_long_read(&session->stats.tx_bytes)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
+               atomic_long_read(&session->stats.tx_errors)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
+               atomic_long_read(&session->stats.rx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
+               atomic_long_read(&session->stats.rx_bytes)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-                       stats.rx_seq_discards) ||
+               atomic_long_read(&session->stats.rx_seq_discards)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-                       stats.rx_oos_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+               atomic_long_read(&session->stats.rx_oos_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
+               atomic_long_read(&session->stats.rx_errors)))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
index 6a53371dba1f1b357f9cd4f34b7af166ed324eab..637a341c1e2d1a466c68efcb50d7fc201198a15b 100644 (file)
@@ -97,6 +97,7 @@
 #include <net/ip.h>
 #include <net/udp.h>
 #include <net/xfrm.h>
+#include <net/inet_common.h>
 
 #include <asm/byteorder.h>
 #include <linux/atomic.h>
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
                          session->name);
 
                /* Not bound. Nothing we can do, so discard. */
-               session->stats.rx_errors++;
+               atomic_long_inc(&session->stats.rx_errors);
                kfree_skb(skb);
        }
 
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 {
        struct pppol2tp_session *ps = l2tp_session_priv(session);
        struct sock *sk = ps->sock;
-       struct sk_buff *skb;
+       struct socket *sock = sk->sk_socket;
 
        BUG_ON(session->magic != L2TP_SESSION_MAGIC);
 
-       if (session->session_id == 0)
-               goto out;
-
-       if (sk != NULL) {
-               lock_sock(sk);
-
-               if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
-                       pppox_unbind_sock(sk);
-                       sk->sk_state = PPPOX_DEAD;
-                       sk->sk_state_change(sk);
-               }
-
-               /* Purge any queued data */
-               skb_queue_purge(&sk->sk_receive_queue);
-               skb_queue_purge(&sk->sk_write_queue);
-               while ((skb = skb_dequeue(&session->reorder_q))) {
-                       kfree_skb(skb);
-                       sock_put(sk);
-               }
 
-               release_sock(sk);
+       if (sock) {
+               inet_shutdown(sock, 2);
+               /* Don't let the session go away before our socket does */
+               l2tp_session_inc_refcount(session);
        }
-
-out:
        return;
 }
 
@@ -483,19 +466,12 @@ out:
  */
 static void pppol2tp_session_destruct(struct sock *sk)
 {
-       struct l2tp_session *session;
-
-       if (sk->sk_user_data != NULL) {
-               session = sk->sk_user_data;
-               if (session == NULL)
-                       goto out;
-
+       struct l2tp_session *session = sk->sk_user_data;
+       if (session) {
                sk->sk_user_data = NULL;
                BUG_ON(session->magic != L2TP_SESSION_MAGIC);
                l2tp_session_dec_refcount(session);
        }
-
-out:
        return;
 }
 
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
        session = pppol2tp_sock_to_session(sk);
 
        /* Purge any queued data */
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
        if (session != NULL) {
-               struct sk_buff *skb;
-               while ((skb = skb_dequeue(&session->reorder_q))) {
-                       kfree_skb(skb);
-                       sock_put(sk);
-               }
+               __l2tp_session_unhash(session);
+               l2tp_session_queue_purge(session);
                sock_put(sk);
        }
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
 
        release_sock(sk);
 
@@ -880,18 +853,6 @@ out:
        return error;
 }
 
-/* Called when deleting sessions via the netlink interface.
- */
-static int pppol2tp_session_delete(struct l2tp_session *session)
-{
-       struct pppol2tp_session *ps = l2tp_session_priv(session);
-
-       if (ps->sock == NULL)
-               l2tp_session_dec_refcount(session);
-
-       return 0;
-}
-
 #endif /* CONFIG_L2TP_V3 */
 
 /* getname() support.
@@ -1025,14 +986,14 @@ end:
 static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
                                struct l2tp_stats *stats)
 {
-       dest->tx_packets = stats->tx_packets;
-       dest->tx_bytes = stats->tx_bytes;
-       dest->tx_errors = stats->tx_errors;
-       dest->rx_packets = stats->rx_packets;
-       dest->rx_bytes = stats->rx_bytes;
-       dest->rx_seq_discards = stats->rx_seq_discards;
-       dest->rx_oos_packets = stats->rx_oos_packets;
-       dest->rx_errors = stats->rx_errors;
+       dest->tx_packets = atomic_long_read(&stats->tx_packets);
+       dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
+       dest->tx_errors = atomic_long_read(&stats->tx_errors);
+       dest->rx_packets = atomic_long_read(&stats->rx_packets);
+       dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
+       dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
+       dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
+       dest->rx_errors = atomic_long_read(&stats->rx_errors);
 }
 
 /* Session ioctl helper.
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
                   tunnel->name,
                   (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
                   atomic_read(&tunnel->ref_count) - 1);
-       seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+       seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
                   tunnel->debug,
-                  (unsigned long long)tunnel->stats.tx_packets,
-                  (unsigned long long)tunnel->stats.tx_bytes,
-                  (unsigned long long)tunnel->stats.tx_errors,
-                  (unsigned long long)tunnel->stats.rx_packets,
-                  (unsigned long long)tunnel->stats.rx_bytes,
-                  (unsigned long long)tunnel->stats.rx_errors);
+                  atomic_long_read(&tunnel->stats.tx_packets),
+                  atomic_long_read(&tunnel->stats.tx_bytes),
+                  atomic_long_read(&tunnel->stats.tx_errors),
+                  atomic_long_read(&tunnel->stats.rx_packets),
+                  atomic_long_read(&tunnel->stats.rx_bytes),
+                  atomic_long_read(&tunnel->stats.rx_errors));
 }
 
 static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
                   session->lns_mode ? "LNS" : "LAC",
                   session->debug,
                   jiffies_to_msecs(session->reorder_timeout));
-       seq_printf(m, "   %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+       seq_printf(m, "   %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
                   session->nr, session->ns,
-                  (unsigned long long)session->stats.tx_packets,
-                  (unsigned long long)session->stats.tx_bytes,
-                  (unsigned long long)session->stats.tx_errors,
-                  (unsigned long long)session->stats.rx_packets,
-                  (unsigned long long)session->stats.rx_bytes,
-                  (unsigned long long)session->stats.rx_errors);
+                  atomic_long_read(&session->stats.tx_packets),
+                  atomic_long_read(&session->stats.tx_bytes),
+                  atomic_long_read(&session->stats.tx_errors),
+                  atomic_long_read(&session->stats.rx_packets),
+                  atomic_long_read(&session->stats.rx_bytes),
+                  atomic_long_read(&session->stats.rx_errors));
 
        if (po)
                seq_printf(m, "   interface %s\n", ppp_dev_name(&po->chan));
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
 
 static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
        .session_create = pppol2tp_session_create,
-       .session_delete = pppol2tp_session_delete,
+       .session_delete = l2tp_session_delete,
 };
 
 #endif /* CONFIG_L2TP_V3 */
index baaa8608e52de8d9d504f14a254bb7e6631b5f2e..3bfe2612c8c2e64f19a0843875b470eb72c7325b 100644 (file)
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
 static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
-       int ret = 0;
+       int ret;
 
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return 0;
 
-       mutex_lock(&local->iflist_mtx);
+       ASSERT_RTNL();
 
        if (local->monitor_sdata)
-               goto out_unlock;
+               return 0;
 
        sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
-       if (!sdata) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!sdata)
+               return -ENOMEM;
 
        /* set up data */
        sdata->local = local;
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (WARN_ON(ret)) {
                /* ok .. stupid driver, it asked for this! */
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_check_queues(sdata);
        if (ret) {
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (ret) {
                drv_remove_interface(local, sdata);
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
+       mutex_lock(&local->iflist_mtx);
        rcu_assign_pointer(local->monitor_sdata, sdata);
- out_unlock:
        mutex_unlock(&local->iflist_mtx);
-       return ret;
+
+       return 0;
 }
 
 static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return;
 
+       ASSERT_RTNL();
+
        mutex_lock(&local->iflist_mtx);
 
        sdata = rcu_dereference_protected(local->monitor_sdata,
                                          lockdep_is_held(&local->iflist_mtx));
-       if (!sdata)
-               goto out_unlock;
+       if (!sdata) {
+               mutex_unlock(&local->iflist_mtx);
+               return;
+       }
 
        rcu_assign_pointer(local->monitor_sdata, NULL);
+       mutex_unlock(&local->iflist_mtx);
+
        synchronize_net();
 
        ieee80211_vif_release_channel(sdata);
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        drv_remove_interface(local, sdata);
 
        kfree(sdata);
- out_unlock:
-       mutex_unlock(&local->iflist_mtx);
 }
 
 /*
index 29ce2aa87e7b60fdd344f7fd8acb1148cbc477fd..4749b3858695e6c2aeae983146d6a055be562a28 100644 (file)
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
 
        rcu_read_lock();
        list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif))
+               if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                   ieee80211_sdata_running(sdata))
                        ieee80211_queue_work(&local->hw, &sdata->work);
        rcu_read_unlock();
 }
index 141577412d8407fc8b18ad354a34ad8de105f154..82cc30318a86f66c4a8f23341ea6ed3f6b62d578 100644 (file)
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
 
        /* Restart STA timers */
        rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               ieee80211_restart_sta_timer(sdata);
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_restart_sta_timer(sdata);
+       }
        rcu_read_unlock();
 }
 
index bb73ed2d20b90e8ece75bdd41a331fecb9c9d7ac..c6844ad080beee0123969c3aca15b7ca349eddda 100644 (file)
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
 
                memset(nskb->cb, 0, sizeof(nskb->cb));
 
-               ieee80211_tx_skb(rx->sdata, nskb);
+               if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
+
+                       info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
+                                     IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
+                                     IEEE80211_TX_CTL_NO_CCK_RATE;
+                       if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+                               info->hw_queue =
+                                       local->hw.offchannel_tx_hw_queue;
+               }
+
+               __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
+                                           status->band);
        }
        dev_kfree_skb(rx->skb);
        return RX_QUEUED;
index a79ce820cb50cf01e5cff62a47b974332c3fe8d6..238a0cca320e621dad86cbca1830e4867459cb6d 100644 (file)
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        int ret, i;
+       bool have_key = false;
 
        might_sleep();
 
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
-       for (i = 0; i < NUM_DEFAULT_KEYS; i++)
+       for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
-       if (sta->ptk)
+               have_key = true;
+       }
+       if (sta->ptk) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
+               have_key = true;
+       }
        mutex_unlock(&local->key_mtx);
 
+       if (!have_key)
+               synchronize_net();
+
        sta->dead = true;
 
        local->num_sta--;
index 47edf5a40a5939d2401dc11baa89142b9bcd64b6..61f49d241712a77797829a294ee94fc10161306e 100644 (file)
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                        skb_reset_network_header(skb);
                        IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
                                &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
-                       rcu_read_lock();
                        ipv4_update_pmtu(skb, dev_net(skb->dev),
                                         mtu, 0, 0, 0, 0);
-                       rcu_read_unlock();
                        /* Client uses PMTUD? */
                        if (!(cih->frag_off & htons(IP_DF)))
                                goto ignore_ipip;
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        ip_vs_fill_iph_skb(af, skb, &iph);
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-       ipvs = net_ipvs(net);
        /* Check the server status */
        if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                /* the destination server is not available */
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
 {
        int r;
        struct net *net;
+       struct netns_ipvs *ipvs;
 
        if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
                return NF_ACCEPT;
 
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        return ip_vs_in_icmp(skb, &r, hooknum);
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
 {
        int r;
        struct net *net;
+       struct netns_ipvs *ipvs;
        struct ip_vs_iphdr iphdr;
 
        ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
 
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
index c68198bf9128abfb695571faed1e0390b036a4bb..9e2d1cccd1eb4c647b2ece3b2c3985edc0b5b32d 100644 (file)
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "backup_only",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
        ipvs->sysctl_pmtu_disc = 1;
        tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
+       tbl[idx++].data = &ipvs->sysctl_backup_only;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
index ae8ec6f2768888eec1fb8b8c68b4242eff033525..cd1d7298f7ba779de3e6654b85fafaf7cec58f8e 100644 (file)
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
        sctp_chunkhdr_t _sctpch, *sch;
        unsigned char chunk_type;
        int event, next_state;
-       int ihl;
+       int ihl, cofs;
 
 #ifdef CONFIG_IP_VS_IPV6
        ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
        ihl = ip_hdrlen(skb);
 #endif
 
-       sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
-                               sizeof(_sctpch), &_sctpch);
+       cofs = ihl + sizeof(sctp_sctphdr_t);
+       sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
        if (sch == NULL)
                return;
 
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
         */
        if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
            (sch->type == SCTP_CID_COOKIE_ACK)) {
-               sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
-                               sch->length), sizeof(_sctpch), &_sctpch);
-               if (sch) {
-                       if (sch->type == SCTP_CID_ABORT)
+               int clen = ntohs(sch->length);
+
+               if (clen >= sizeof(sctp_chunkhdr_t)) {
+                       sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
+                                                sizeof(_sctpch), &_sctpch);
+                       if (sch && sch->type == SCTP_CID_ABORT)
                                chunk_type = sch->type;
                }
        }
index 432f95780003f2e36a4d0fb08aaa10b900e052ff..ba65b2041eb4b7f747f7df605a049e7922b32a55 100644 (file)
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&dccp_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&dccp_proto4);
        if (ret < 0)
                goto out_dccp4;
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
        if (ret < 0)
                goto out_dccp6;
 
-       ret = register_pernet_subsys(&dccp_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&dccp_proto6);
 out_dccp6:
        nf_ct_l4proto_unregister(&dccp_proto4);
 out_dccp4:
+       unregister_pernet_subsys(&dccp_net_ops);
+out_pernet:
        return ret;
 }
 
index bd7d01d9c7e77d0e8a3a1e50116f854bb4925e14..155ce9f8a0db047d0e100b1d0283b8b36cd6e082 100644 (file)
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
 {
        int ret;
 
-       ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
-       if (ret < 0)
-               goto out_gre4;
-
        ret = register_pernet_subsys(&proto_gre_net_ops);
        if (ret < 0)
                goto out_pernet;
 
+       ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
+       if (ret < 0)
+               goto out_gre4;
+
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
 out_gre4:
+       unregister_pernet_subsys(&proto_gre_net_ops);
+out_pernet:
        return ret;
 }
 
index 480f616d59361e0fff4abf182dc9f35974606187..ec83536def9ab89aca0678ac7b7e02a5509b4f01 100644 (file)
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&sctp_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
        if (ret < 0)
                goto out_sctp4;
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
        if (ret < 0)
                goto out_sctp6;
 
-       ret = register_pernet_subsys(&sctp_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
 out_sctp6:
        nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
 out_sctp4:
+       unregister_pernet_subsys(&sctp_net_ops);
+out_pernet:
        return ret;
 }
 
index 157489581c313b02456b18d0e3e886746a60671f..ca969f6273f77a58851858357c25285b9f22e10e 100644 (file)
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&udplite_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
        if (ret < 0)
                goto out_udplite4;
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
        if (ret < 0)
                goto out_udplite6;
 
-       ret = register_pernet_subsys(&udplite_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
 out_udplite6:
        nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
 out_udplite4:
+       unregister_pernet_subsys(&udplite_net_ops);
+out_pernet:
        return ret;
 }
 
index 858fd52c10408393a901a9afb686226638f32f07..1cb48540f86a96df1e1b26371e5d75589861562d 100644 (file)
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
        inst->queue_num = queue_num;
        inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
-       inst->copy_range = 0xfffff;
+       inst->copy_range = 0xffff;
        inst->copy_mode = NFQNL_COPY_NONE;
        spin_lock_init(&inst->lock);
        INIT_LIST_HEAD(&inst->queue_list);
index f2aabb6f410582439604d7a3c0f379a5cd798621..5a55be3f17a54aa2acac3ce109c412c86c652b36 100644 (file)
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
        int err = 0;
 
        BUG_ON(grp->name[0] == '\0');
+       BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
 
        genl_lock();
 
index 13aa47aa2ffb48ea25990dce3762b83e775d7bcf..1bc210ffcba2a524750b3382d444a87db2c08c2c 100644 (file)
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)
                cbq_update(q);
                if ((incr -= incr2) < 0)
                        incr = 0;
+               q->now += incr;
+       } else {
+               if (now > q->now)
+                       q->now = now;
        }
-       q->now += incr;
        q->now_rt = now;
 
        for (;;) {
index 4e606fcb2534929a2470e807816ac1089d81b7b2..55786283a3dfe613ee6deac5756b4726a66925ee 100644 (file)
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                flow->deficit = q->quantum;
                flow->dropped = 0;
        }
-       if (++sch->q.qlen < sch->limit)
+       if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
        q->drop_overlimit++;
index ffad48109a228c66ccbd33124525dee83f9158df..eac7e0ee23c18708354e3cef8c237d289b81235f 100644 (file)
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
        u64 mult;
        int shift;
 
-       r->rate_bps = rate << 3;
+       r->rate_bps = (u64)rate << 3;
        r->shift = 0;
        r->mult = 1;
        /*
index fb20f25ddec9b70c805ad65d675441d02b62055b..f8529fc8e54275c5b7b9809f0219f20608ffb472 100644 (file)
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
                list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
        task->tk_waitqueue = queue;
        queue->qlen++;
+       /* barrier matches the read in rpc_wake_up_task_queue_locked() */
+       smp_wmb();
        rpc_set_queued(task);
 
        dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
  */
 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 {
-       if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
-               __rpc_do_wake_up_task(queue, task);
+       if (RPC_IS_QUEUED(task)) {
+               smp_rmb();
+               if (task->tk_waitqueue == queue)
+                       __rpc_do_wake_up_task(queue, task);
+       }
 }
 
 /*
index 51be64f163ec166812660db8de26105b1c92eb46..971282b6f6a38fd056dd65d127b85f044b7c6041 100644 (file)
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
 #endif
 }
 
-static int unix_release_sock(struct sock *sk, int embrion)
+static void unix_release_sock(struct sock *sk, int embrion)
 {
        struct unix_sock *u = unix_sk(sk);
        struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
 
        if (unix_tot_inflight)
                unix_gc();              /* Garbage collect fds */
-
-       return 0;
 }
 
 static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       unix_release_sock(sk, 0);
        sock->sk = NULL;
 
-       return unix_release_sock(sk, 0);
+       return 0;
 }
 
 static int unix_autobind(struct socket *sock)
@@ -1413,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
        if (UNIXCB(skb).cred)
                return;
        if (test_bit(SOCK_PASSCRED, &sock->flags) ||
-           !other->sk_socket ||
-           test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+           (other->sk_socket &&
+           test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) {
                UNIXCB(skb).pid  = get_pid(task_tgid(current));
                UNIXCB(skb).cred = get_current_cred();
        }
index ca511c4f388a56cf1fdaf110d31aff7d30b5d0fb..d8079daf1bdeaed0f341aaac673c8b9e1da7d96b 100644 (file)
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
        struct vsock_sock *vsk;
 
        list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
-               if (vsock_addr_equals_addr_any(addr, &vsk->local_addr))
+               if (addr->svm_port == vsk->local_addr.svm_port)
                        return sk_vsock(vsk);
 
        return NULL;
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
 
        list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
                            connected_table) {
-               if (vsock_addr_equals_addr(src, &vsk->remote_addr)
-                   && vsock_addr_equals_addr(dst, &vsk->local_addr)) {
+               if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
+                   dst->svm_port == vsk->local_addr.svm_port) {
                        return sk_vsock(vsk);
                }
        }
index a70ace83a1531232a2f11afc63c8b4ab1c4ea918..1f6508e249ae5934ce9257ebe5cc3e5941300ffa 100644 (file)
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(
        struct vsock_sock *vlistener;
        struct vsock_sock *vpending;
        struct sock *pending;
+       struct sockaddr_vm src;
+
+       vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
 
        vlistener = vsock_sk(listener);
 
        list_for_each_entry(vpending, &vlistener->pending_links,
                            pending_links) {
-               struct sockaddr_vm src;
-               struct sockaddr_vm dst;
-
-               vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
-               vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
-
                if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
-                   vsock_addr_equals_addr(&dst, &vpending->local_addr)) {
+                   pkt->dst_port == vpending->local_addr.svm_port) {
                        pending = sk_vsock(vpending);
                        sock_hold(pending);
                        goto found;
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
         */
        bh_lock_sock(sk);
 
-       if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED)
-               vmci_trans(vsk)->notify_ops->handle_notify_pkt(
-                               sk, pkt, true, &dst, &src,
-                               &bh_process_pkt);
+       if (!sock_owned_by_user(sk)) {
+               /* The local context ID may be out of date, update it. */
+               vsk->local_addr.svm_cid = dst.svm_cid;
+
+               if (sk->sk_state == SS_CONNECTED)
+                       vmci_trans(vsk)->notify_ops->handle_notify_pkt(
+                                       sk, pkt, true, &dst, &src,
+                                       &bh_process_pkt);
+       }
 
        bh_unlock_sock(sk);
 
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
 
        lock_sock(sk);
 
+       /* The local context ID may be out of date. */
+       vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
+
        switch (sk->sk_state) {
        case SS_LISTEN:
                vmci_transport_recv_listen(sk, pkt);
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,
        pending = vmci_transport_get_pending(sk, pkt);
        if (pending) {
                lock_sock(pending);
+
+               /* The local context ID may be out of date. */
+               vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
+
                switch (pending->sk_state) {
                case SS_CONNECTING:
                        err = vmci_transport_recv_connecting_server(sk,
index b7df1aea7c59364b18f4e7a71b448909120a9b45..ec2611b4ea0ee4fc073926c9a33347ace8de9e4f 100644 (file)
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
 }
 EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
 
-bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
-                               const struct sockaddr_vm *other)
-{
-       return (addr->svm_cid == VMADDR_CID_ANY ||
-               other->svm_cid == VMADDR_CID_ANY ||
-               addr->svm_cid == other->svm_cid) &&
-              addr->svm_port == other->svm_port;
-}
-EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
-
 int vsock_addr_cast(const struct sockaddr *addr,
                    size_t len, struct sockaddr_vm **out_addr)
 {
index cdfbcefdf84300f71fd598a782a8de66d6c095fb..9ccd5316eac09ddfeb7a07f030a9f4f993da7de2 100644 (file)
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);
 void vsock_addr_unbind(struct sockaddr_vm *addr);
 bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
                            const struct sockaddr_vm *other);
-bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
-                               const struct sockaddr_vm *other);
 int vsock_addr_cast(const struct sockaddr *addr, size_t len,
                    struct sockaddr_vm **out_addr);
 
index ea4155fe97334f91794dbb7a55f0dab28219c2a3..6ddf74f0ae1e5ace4346f3c0cf6b4f1ce13fa580 100644 (file)
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
        rdev_rfkill_poll(rdev);
 }
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev)
+{
+       lockdep_assert_held(&rdev->devlist_mtx);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
+
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
+               return;
+
+       if (!wdev->p2p_started)
+               return;
+
+       rdev_stop_p2p_device(rdev, wdev);
+       wdev->p2p_started = false;
+
+       rdev->opencount--;
+
+       if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+               bool busy = work_busy(&rdev->scan_done_wk);
+
+               /*
+                * If the work isn't pending or running (in which case it would
+                * be waiting for the lock we hold) the driver didn't properly
+                * cancel the scan when the interface was removed. In this case
+                * warn and leak the scan request object to not crash later.
+                */
+               WARN_ON(!busy);
+
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, !busy);
+       }
+}
+
 static int cfg80211_rfkill_set_block(void *data, bool blocked)
 {
        struct cfg80211_registered_device *rdev = data;
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                return 0;
 
        rtnl_lock();
-       mutex_lock(&rdev->devlist_mtx);
+
+       /* read-only iteration need not hold the devlist_mtx */
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->netdev) {
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                /* otherwise, check iftype */
                switch (wdev->iftype) {
                case NL80211_IFTYPE_P2P_DEVICE:
-                       if (!wdev->p2p_started)
-                               break;
-                       rdev_stop_p2p_device(rdev, wdev);
-                       wdev->p2p_started = false;
-                       rdev->opencount--;
+                       /* but this requires it */
+                       mutex_lock(&rdev->devlist_mtx);
+                       mutex_lock(&rdev->sched_scan_mtx);
+                       cfg80211_stop_p2p_device(rdev, wdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
+                       mutex_unlock(&rdev->devlist_mtx);
                        break;
                default:
                        break;
                }
        }
 
-       mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
 
        return 0;
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)
        wdev = container_of(work, struct wireless_dev, cleanup_work);
        rdev = wiphy_to_dev(wdev->wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
                rdev->scan_req->aborted = true;
                ___cfg80211_scan_done(rdev, true);
        }
 
-       cfg80211_unlock_rdev(rdev);
-
-       mutex_lock(&rdev->sched_scan_mtx);
-
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
                return;
 
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        list_del_rcu(&wdev->list);
        rdev->devlist_generation++;
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (!wdev->p2p_started)
-                       break;
-               rdev_stop_p2p_device(rdev, wdev);
-               wdev->p2p_started = false;
-               rdev->opencount--;
+               cfg80211_stop_p2p_device(rdev, wdev);
                break;
        default:
                WARN_ON_ONCE(1);
                break;
        }
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 }
 EXPORT_SYMBOL(cfg80211_unregister_wdev);
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                cfg80211_update_iface_num(rdev, wdev->iftype, 1);
                cfg80211_lock_rdev(rdev);
                mutex_lock(&rdev->devlist_mtx);
+               mutex_lock(&rdev->sched_scan_mtx);
                wdev_lock(wdev);
                switch (wdev->iftype) {
 #ifdef CONFIG_CFG80211_WEXT
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        break;
                }
                wdev_unlock(wdev);
+               mutex_unlock(&rdev->sched_scan_mtx);
                rdev->opencount++;
                mutex_unlock(&rdev->devlist_mtx);
                cfg80211_unlock_rdev(rdev);
index 3aec0e429d8adbf9d44c7fbc5bb7fa503e5b9818..5845c2b37aa832b444906ad4fb0e4e74f06c3ee3 100644 (file)
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype, int num);
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev);
+
 #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
 
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
index d44ab216c0ecd8b01d4386edf62cd07c3f2071db..58e13a8c95f90a7b880a9d65530799fc02451f1e 100644 (file)
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->scan)
                return -EOPNOTSUPP;
 
-       if (rdev->scan_req)
-               return -EBUSY;
+       mutex_lock(&rdev->sched_scan_mtx);
+       if (rdev->scan_req) {
+               err = -EBUSY;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
                n_channels = validate_scan_freqs(
                                info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
-               if (!n_channels)
-                       return -EINVAL;
+               if (!n_channels) {
+                       err = -EINVAL;
+                       goto unlock;
+               }
        } else {
                enum ieee80211_band band;
                n_channels = 0;
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
                        n_ssids++;
 
-       if (n_ssids > wiphy->max_scan_ssids)
-               return -EINVAL;
+       if (n_ssids > wiphy->max_scan_ssids) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_IE])
                ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
        else
                ie_len = 0;
 
-       if (ie_len > wiphy->max_scan_ie_len)
-               return -EINVAL;
+       if (ie_len > wiphy->max_scan_ie_len) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                kfree(request);
        }
 
+ unlock:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->stop_p2p_device)
                return -EOPNOTSUPP;
 
-       if (!wdev->p2p_started)
-               return 0;
-
-       rdev_stop_p2p_device(rdev, wdev);
-       wdev->p2p_started = false;
-
-       mutex_lock(&rdev->devlist_mtx);
-       rdev->opencount--;
-       mutex_unlock(&rdev->devlist_mtx);
-
-       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
-               rdev->scan_req->aborted = true;
-               ___cfg80211_scan_done(rdev, true);
-       }
+       mutex_lock(&rdev->sched_scan_mtx);
+       cfg80211_stop_p2p_device(rdev, wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        return 0;
 }
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        struct nlattr *nest;
        int i;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (WARN_ON(!req))
                return 0;
index 674aadca007982257b14b3c68abbafc39ee01461..fd99ea495b7e68cd9e5265542a6aa059d0ac7760 100644 (file)
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
        union iwreq_data wrqu;
 #endif
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        request = rdev->scan_req;
 
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            scan_done_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        ___cfg80211_scan_done(rdev, false);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
        found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
-               found->pub.beacon_interval = tmp->pub.beacon_interval;
-               found->pub.signal = tmp->pub.signal;
-               found->pub.capability = tmp->pub.capability;
-               found->ts = tmp->ts;
-
                /* Update IEs */
                if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
                        const struct cfg80211_bss_ies *old;
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
                        if (found->pub.hidden_beacon_bss &&
                            !list_empty(&found->hidden_list)) {
+                               const struct cfg80211_bss_ies *f;
+
                                /*
                                 * The found BSS struct is one of the probe
                                 * response members of a group, but we're
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                 * SSID to showing it, which is confusing so
                                 * drop this information.
                                 */
+
+                               f = rcu_access_pointer(tmp->pub.beacon_ies);
+                               kfree_rcu((struct cfg80211_bss_ies *)f,
+                                         rcu_head);
                                goto drop;
                        }
 
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                kfree_rcu((struct cfg80211_bss_ies *)old,
                                          rcu_head);
                }
+
+               found->pub.beacon_interval = tmp->pub.beacon_interval;
+               found->pub.signal = tmp->pub.signal;
+               found->pub.capability = tmp->pub.capability;
+               found->ts = tmp->ts;
        } else {
                struct cfg80211_internal_bss *new;
                struct cfg80211_internal_bss *hidden;
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
+       mutex_lock(&rdev->sched_scan_mtx);
        if (rdev->scan_req) {
                err = -EBUSY;
                goto out;
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                dev_hold(dev);
        }
  out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        kfree(creq);
        cfg80211_unlock_rdev(rdev);
        return err;
index f432bd3755b19f0d865b1b2e9f2a132890f3b6a3..09d994d192ffa1c78b963933daad73e4240dfe4b 100644 (file)
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        ASSERT_RTNL();
        ASSERT_RDEV_LOCK(rdev);
        ASSERT_WDEV_LOCK(wdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (rdev->scan_req)
                return -EBUSY;
@@ -320,11 +321,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-       mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
        wdev_lock(wdev);
        __cfg80211_sme_scan_done(dev);
        wdev_unlock(wdev);
-       mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
 }
 
 void cfg80211_sme_rx_auth(struct net_device *dev,
@@ -924,9 +923,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
        int err;
 
        mutex_lock(&rdev->devlist_mtx);
+       /* might request scan - scan_mtx -> wdev_mtx dependency */
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(dev->ieee80211_ptr);
        err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 
        return err;
index b7a531380e19d5bf879f2395eee1176a1abfac93..7586de77a2f8b57ebb9b832b76563ab66a59111a 100644 (file)
@@ -27,7 +27,8 @@
 #define WIPHY_PR_ARG   __entry->wiphy_name
 
 #define WDEV_ENTRY     __field(u32, id)
-#define WDEV_ASSIGN    (__entry->id) = (wdev ? wdev->identifier : 0)
+#define WDEV_ASSIGN    (__entry->id) = (!IS_ERR_OR_NULL(wdev)  \
+                                        ? wdev->identifier : 0)
 #define WDEV_PR_FMT    "wdev(%u)"
 #define WDEV_PR_ARG    (__entry->id)
 
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
-               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
                __entry->acl_policy = params->acl_policy;
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
index fb9622f6d99c4b6b6c87f8a8d8e2c88e4fb3705c..e79cb5c0655ad34ec0e8df85471e6c77ad856716 100644 (file)
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        err = 0;
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
index 35754cc8a9e5b9c51cdaf52693128ee2098718f7..8dafe6d3c6e41ebd20e5d0347d4ab9b0e6326a34 100644 (file)
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
                x->xflags &= ~XFRM_TIME_DEFER;
 }
 
+static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
+{
+       u32 seq_diff, oseq_diff;
+       struct km_event c;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
+
+       /* we send notify messages in case
+        *  1. we updated on of the sequence numbers, and the seqno difference
+        *     is at least x->replay_maxdiff, in this case we also update the
+        *     timeout of our timer function
+        *  2. if x->replay_maxage has elapsed since last update,
+        *     and there were changes
+        *
+        *  The state structure must be locked!
+        */
+
+       switch (event) {
+       case XFRM_REPLAY_UPDATE:
+               if (!x->replay_maxdiff)
+                       break;
+
+               if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                       seq_diff = replay_esn->seq - preplay_esn->seq;
+               else
+                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
+
+               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
+               else
+                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
+
+               if (seq_diff < x->replay_maxdiff &&
+                   oseq_diff < x->replay_maxdiff) {
+
+                       if (x->xflags & XFRM_TIME_DEFER)
+                               event = XFRM_REPLAY_TIMEOUT;
+                       else
+                               return;
+               }
+
+               break;
+
+       case XFRM_REPLAY_TIMEOUT:
+               if (memcmp(x->replay_esn, x->preplay_esn,
+                          xfrm_replay_state_esn_len(replay_esn)) == 0) {
+                       x->xflags |= XFRM_TIME_DEFER;
+                       return;
+               }
+
+               break;
+       }
+
+       memcpy(x->preplay_esn, x->replay_esn,
+              xfrm_replay_state_esn_len(replay_esn));
+       c.event = XFRM_MSG_NEWAE;
+       c.data.aevent = event;
+       km_state_notify(x, &c);
+
+       if (x->replay_maxage &&
+           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+               x->xflags &= ~XFRM_TIME_DEFER;
+}
+
 static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = 0;
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
        .advance        = xfrm_replay_advance_esn,
        .check          = xfrm_replay_check_esn,
        .recheck        = xfrm_replay_recheck_esn,
-       .notify         = xfrm_replay_notify_bmp,
+       .notify         = xfrm_replay_notify_esn,
        .overflow       = xfrm_replay_overflow_esn,
 };
 
index 23414b93771f30ec82ccf76b6cfb49fbed27edef..13c88fbcf0371cc32340791e335eeb0b4758f875 100644 (file)
@@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent)
        /* Only disallow PTRACE_TRACEME on more aggressive settings. */
        switch (ptrace_scope) {
        case YAMA_SCOPE_CAPABILITY:
-               rcu_read_lock();
-               if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE))
+               if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
                        rc = -EPERM;
-               rcu_read_unlock();
                break;
        case YAMA_SCOPE_NO_ATTACH:
                rc = -EPERM;